IEMAll.cpp revision ce1318f291ab810c19aa03ce38d6a87679ba3af7
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * IEM - Interpreted Execution Manager - All Contexts.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Copyright (C) 2011-2012 Oracle Corporation
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * available from http://www.virtualbox.org. This file is free software;
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * you can redistribute it and/or modify it under the terms of the GNU
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * General Public License (GPL) as published by the Free Software
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** @page pg_iem IEM - Interpreted Execution Manager
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * The interpreted exeuction manager (IEM) is for executing short guest code
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * sequences that are causing too many exits / virtualization traps. It will
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * also be used to interpret single instructions, thus replacing the selective
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * interpreters in EM and IOM.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Design goals:
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * - Relatively small footprint, although we favour speed and correctness
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * over size.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * - Reasonably fast.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * - Correctly handle lock prefixed instructions.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * - Complete instruction set - eventually.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * - Refactorable into a recompiler, maybe.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * - Replace EMInterpret*.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Using the existing disassembler has been considered, however this is thought
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * to conflict with speed as the disassembler chews things a bit too much while
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * leaving us with a somewhat complicated state to interpret afterwards.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * The current code is very much work in progress. You've been warned!
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @section sec_iem_fpu_instr FPU Instructions
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * same or equivalent instructions on the host FPU. To make life easy, we also
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * let the FPU prioritize the unmasked exceptions for us. This however, only
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * for FPU exception delivery, because with CR0.NE=0 there is a window where we
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * can trigger spurious FPU exceptions.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/*******************************************************************************
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync* Header Files *
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync*******************************************************************************/
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/*******************************************************************************
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync* Structures and Typedefs *
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync*******************************************************************************/
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** @typedef PFNIEMOP
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Pointer to an opcode decoder function.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** @def FNIEMOP_DEF
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Define an opcode decoder function.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * We're using macors for this so that adding and removing parameters as well as
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @param a_Name The function name.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsynctypedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsynctypedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync /** The legacy view. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync /** The long mode view. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Pointer to a selector descriptor table entry. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/*******************************************************************************
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync* Defined Constants And Macros *
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync*******************************************************************************/
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** @name IEM status codes.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Not quite sure how this will play out in the end, just aliasing safe status
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * codes for now.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Temporary hack to disable the double execution. Will be removed in favor
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * of a dedicated execution mode in EM. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync//#define IEM_VERIFICATION_MODE_NO_REM
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * due to GCC lacking knowledge about the value range of a switch. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Call an opcode decoder function.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * We're using macors for this so that adding and removing parameters can be
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * done as we please. See FNIEMOP_DEF.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Call a common opcode decoder function taking one extra argument.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * We're using macors for this so that adding and removing parameters can be
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * done as we please. See FNIEMOP_DEF_1.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Call a common opcode decoder function taking one extra argument.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * We're using macors for this so that adding and removing parameters can be
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * done as we please. See FNIEMOP_DEF_1.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Check if we're currently executing in real or virtual 8086 mode.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @returns @c true if it is, @c false if not.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @param a_pIemCpu The IEM state of the current CPU.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Check if we're currently executing in long mode.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @returns @c true if it is, @c false if not.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @param a_pIemCpu The IEM state of the current CPU.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Check if we're currently executing in real mode.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @returns @c true if it is, @c false if not.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @param a_pIemCpu The IEM state of the current CPU.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Tests if an AMD CPUID feature (extended) is marked present - ECX.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Tests if an AMD CPUID feature (extended) is marked present - EDX.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Tests if at least on of the specified AMD CPUID features (extended) are
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * marked present.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Checks if a intel CPUID feature is present.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * Check if the address is canonical.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/*******************************************************************************
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync* Global Variables *
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync*******************************************************************************/
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsyncextern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the ADD instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the ADC instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the SUB instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the SBB instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the OR instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the XOR instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the AND instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the CMP instruction.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @remarks Making operand order ASSUMPTIONS.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the TEST instruction.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync * @remarks Making operand order ASSUMPTIONS.
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the BT instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the BTC instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the BTR instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the BTS instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the BSF instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the BSR instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the IMUL instruction. */
99f33ab590a3a65e0cd082dd8d67779efb9cc6c9vboxsync/** Group 1 /r lookup table. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the INC instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the DEC instruction. */
a1a825a2fcd6b32bd63d40a0705ef68fcbf1ed16vboxsync/** Function table for the NEG instruction. */
/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
static VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
static VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
#ifdef IEM_VERIFICATION_MODE
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
#ifdef IEM_VERIFICATION_MODE
* What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
#ifdef IEM_VERIFICATION_MODE
return VINF_SUCCESS;
return rc;
return VINF_SUCCESS;
* What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
return rc;
return VINF_SUCCESS;
*pb = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
return rcStrict2; \
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
*pu16 = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
*pu32 = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
*pu64 = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
return rcStrict2; \
*pu32 = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
*pu64 = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
return rcStrict2; \
*pu64 = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
*pu64 = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
return rcStrict;
Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
/** @todo testcase: check if the TSS.ssX RPL is checked. */
Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
return VINF_SUCCESS;
Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
return VINF_SUCCESS;
Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
return VINF_SUCCESS;
return rcStrict;
switch (u8Vector)
case X86_XCPT_DB:
static VBOXSTRICTRC
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
return rcStrict;
return rcStrict;
return rcStrict;
static VBOXSTRICTRC
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
return rcStrict;
Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
case X86_SEL_TYPE_SYS_LDT:
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
return rcStrict;
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
return rcStrict;
return rcStrict;
uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
return rcStrict;
uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
return rcStrict;
return rcStrict;
return rcStrict;
pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
return rcStrict;
uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
return rcStrict;
return rcStrict;
static VBOXSTRICTRC
NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
static VBOXSTRICTRC
NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
#ifdef LOG_ENABLED
if (LogIs3Enabled())
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
return rcStrict;
#ifdef SOME_UNUSED_FUNCTION
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
switch (rc)
case VERR_PAGE_NOT_PRESENT:
uErr = 0;
case VERR_ACCESS_DENIED:
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
case IEM_OP_PRF_SIZE_OP:
case IEM_OP_PRF_SIZE_REX_W:
AssertFailed();
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
RTAssertPanic(); \
return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
typedef int ignore_semicolon
RTAssertPanic(); \
return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
typedef int ignore_semicolon
switch (iSegReg)
switch (iSegReg)
switch (iSegReg)
switch (iReg)
pu8Reg++;
return pu8Reg;
return *pbSrc;
#ifdef RT_ARCH_AMD64
return u16Fsw;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_SUCCESS;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return VINF_SUCCESS;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
default: AssertFailed();
return GCPtrTop;
return GCPtrTop;
return GCPtrTop;
return GCPtrTop;
#ifdef IN_RING3
switch (iEffSeg)
static void iemFpuPushResult(PIEMCPU pIemCpu, PIEMFPURESULT pResult, PCPUMCTX pCtx, uint16_t iDstReg,
uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
if (!fXcpts)
pCtx->fpu.FSW |= (iNewTop << X86_FSW_TOP_SHIFT) | (pResult->FSW & ~(X86_FSW_TOP_MASK | X86_FSW_B | X86_FSW_ES));
AssertFailed();
uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
if (!fXcpts)
AssertFailed();
uint16_t fXcpts = (pResult->FSW & (X86_FSW_IE | X86_FSW_DE | X86_FSW_ZE | X86_FSW_OE | X86_FSW_UE | X86_FSW_PE))
if (!fXcpts)
AssertFailed();
static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
DECL_NO_INLINE(static, void)
DECL_NO_INLINE(static, void)
iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
return VINF_SUCCESS;
return VERR_NOT_FOUND;
return VINF_SUCCESS;
return VERR_NOT_FOUND;
static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
return VINF_SUCCESS;
return VINF_SUCCESS;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
return VINF_SUCCESS;
case IEMMODE_64BIT:
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
return VINF_SUCCESS;
#ifdef IEM_VERIFICATION_MODE
return VERR_PGM_PHYS_TLB_CATCH_ALL;
ppvMem);
DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem)
return VERR_NOT_FOUND;
int rc;
cbFirst);
cbSecond);
cbFirst);
cbSecond);
#ifdef IEM_VERIFICATION_MODE
if (pEvtRec)
memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
if (pEvtRec)
return rc;
VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
return rcStrict;
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
return rcStrict;
int rc;
return rc;
return rc;
return rc;
rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
return rc;
#ifdef IEM_VERIFICATION_MODE
if (pEvtRec)
if (pEvtRec)
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
return rcMap;
int rc;
return rc;
#ifdef IEM_VERIFICATION_MODE
if (pEvtRec)
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
return rcStrict;
return rcStrict;
void *pvMem;
return VINF_SUCCESS;
if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
#ifdef SOME_UNUSED_FUNCTION
static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
*pu64Dst = 0;
return rc;
static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
(void **)&pu8Src,
switch (enmOpSize)
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return rcStrict;
static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
#ifdef SOME_UNUSED_FUNCTION
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rcStrict;
static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
return rcStrict;
static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
return rcStrict;
static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
return rc;
static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
return rc;
VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
return rcStrict;
return rcStrict;
return rcStrict;
#include "IEMAllCImpl.cpp.h"
#define IEM_MC_END() }
#define IEM_MC_PAUSE() do {} while (0)
#define IEM_MC_CONTINUE() do {} while (0)
return rcStrict2; \
#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
if (iemFpu2StRegsNonEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
#define IEM_MC_ELSE() } else {
#define IEM_MC_ENDIF() } do {} while (0)
#ifdef DEBUG
Log2(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
Log2(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
#define IEMOP_HLP_NO_LOCK_PREFIX() \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
#define IEMOP_HLP_NO_64BIT() \
return IEMOP_RAISE_INVALID_OPCODE(); \
#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
#define SET_SS_DEF() \
case IEMMODE_16BIT:
case 0: u16EffAddr = 0; break;
return VINF_SUCCESS;
case IEMMODE_32BIT:
SET_SS_DEF();
return VINF_SUCCESS;
case IEMMODE_64BIT:
SET_SS_DEF();
return VINF_SUCCESS;
#include "IEMAllInstructions.cpp.h"
while (pEvtRec)
} while (pEvtRec);
return NULL;
if (pEvtRec)
pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
if (!pEvtRec)
return NULL;
return pEvtRec;
if (!pVCpu)
if (!pEvtRec)
if (!pVCpu)
if (!pEvtRec)
if (!pVCpu)
if (!pEvtRec)
if (!pVCpu)
if (!pEvtRec)
VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
AssertFailed();
VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
AssertFailed();
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
if (pEvtRec)
return VINF_SUCCESS;
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
if (pEvtRec)
return VINF_SUCCESS;
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
case IEMVERIFYEVENT_RAM_READ:
case IEMVERIFYEVENT_RAM_WRITE:
static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
unsigned cDiffs = 0;
case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
cDiffs++; \
RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
cDiffs++; \
RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
cDiffs++; \
pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
cDiffs++;
RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
if (cDiffs != 0)
if (LogIs3Enabled())
if (cDiffs == 0)
bool fEquals;
case IEMVERIFYEVENT_RAM_READ:
case IEMVERIFYEVENT_RAM_WRITE:
fEquals = false;
if (!fEquals)
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
return VERR_INTERNAL_ERROR;
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
return VERR_INTERNAL_ERROR;
#ifdef LOG_ENABLED
if (LogIs2Enabled())
szInstr));
if (LogIs3Enabled())
return rcStrict;
// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
b; IEM_OPCODE_GET_NEXT_U8(&b);
return rcStrict;
VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
switch (enmType)
case TRPM_HARDWARE_INT:
case TRPM_SOFTWARE_INT:
case TRPM_TRAP:
switch (u8TrapNo)
case X86_XCPT_DF:
case X86_XCPT_TS:
case X86_XCPT_NP:
case X86_XCPT_SS:
case X86_XCPT_PF:
case X86_XCPT_AC: