IEMAll.cpp revision ceec4276710e550e3d28840e12501f4da1e8ad7b
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * IEM - Interpreted Execution Manager - All Contexts.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Copyright (C) 2011-2012 Oracle Corporation
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * available from http://www.virtualbox.org. This file is free software;
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * you can redistribute it and/or modify it under the terms of the GNU
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * General Public License (GPL) as published by the Free Software
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync/** @page pg_iem IEM - Interpreted Execution Manager
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * The interpreted exeuction manager (IEM) is for executing short guest code
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * sequences that are causing too many exits / virtualization traps. It will
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * also be used to interpret single instructions, thus replacing the selective
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * interpreters in EM and IOM.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Design goals:
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * - Relatively small footprint, although we favour speed and correctness
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * over size.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Reasonably fast.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Correctly handle lock prefixed instructions.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Complete instruction set - eventually.
c8bcebedf264bc1287bcce50bdf66d08e28a88dcvboxsync * - Refactorable into a recompiler, maybe.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * - Replace EMInterpret*.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Using the existing disassembler has been considered, however this is thought
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * to conflict with speed as the disassembler chews things a bit too much while
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * leaving us with a somewhat complicated state to interpret afterwards.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * The current code is very much work in progress. You've been warned!
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @section sec_iem_fpu_instr FPU Instructions
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * same or equivalent instructions on the host FPU. To make life easy, we also
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * let the FPU prioritize the unmasked exceptions for us. This however, only
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * for FPU exception delivery, because with CR0.NE=0 there is a window where we
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * can trigger spurious FPU exceptions.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * The guest FPU state is not loaded into the host CPU and kept there till we
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * leave IEM because the calling conventions have declared an all year open
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * season on much of the FPU state. For instance an innocent looking call to
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * memcpy might end up using a whole bunch of XMM or MM registers if the
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * particular implementation finds it worthwhile.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @section sec_iem_logging Logging
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * The IEM code uses the \"IEM\" log group for the main logging. The different
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * logging levels/flags are generally used for the following purposes:
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Flow (LogFlow): Additional exception details, basic enter/exit IEM
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * state info.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Level 2 (Log2): ?
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Level 3 (Log3): More detailed enter/exit IEM state info.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Level 4 (Log4): Decoding mnemonics w/ EIP.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Level 5 (Log5): Decoding details.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/*******************************************************************************
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync* Header Files *
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync*******************************************************************************/
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/*******************************************************************************
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync* Structures and Typedefs *
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync*******************************************************************************/
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** @typedef PFNIEMOP
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Pointer to an opcode decoder function.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** @def FNIEMOP_DEF
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * Define an opcode decoder function.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * We're using macors for this so that adding and removing parameters as well as
c6829e595cb9bb58ee792563fcb57e961ad0a304vboxsync * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param a_Name The function name.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsynctypedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name (PIEMCPU pIemCpu)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsynctypedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync /** The legacy view. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync /** The long mode view. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Pointer to a selector descriptor table entry. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/*******************************************************************************
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync* Defined Constants And Macros *
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync*******************************************************************************/
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** @name IEM status codes.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Not quite sure how this will play out in the end, just aliasing safe status
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * codes for now.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Temporary hack to disable the double execution. Will be removed in favor
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * of a dedicated execution mode in EM. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync//#define IEM_VERIFICATION_MODE_NO_REM
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * due to GCC lacking knowledge about the value range of a switch. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Call an opcode decoder function.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * We're using macors for this so that adding and removing parameters can be
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * done as we please. See FNIEMOP_DEF.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Call a common opcode decoder function taking one extra argument.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * We're using macors for this so that adding and removing parameters can be
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync * done as we please. See FNIEMOP_DEF_1.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Call a common opcode decoder function taking one extra argument.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * We're using macors for this so that adding and removing parameters can be
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync * done as we please. See FNIEMOP_DEF_1.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync * Check if we're currently executing in real or virtual 8086 mode.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @returns @c true if it is, @c false if not.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param a_pIemCpu The IEM state of the current CPU.
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Check if we're currently executing in long mode.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @returns @c true if it is, @c false if not.
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync * @param a_pIemCpu The IEM state of the current CPU.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync * Check if we're currently executing in real mode.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @returns @c true if it is, @c false if not.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param a_pIemCpu The IEM state of the current CPU.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Tests if an AMD CPUID feature (extended) is marked present - ECX.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Tests if an AMD CPUID feature (extended) is marked present - EDX.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Tests if at least on of the specified AMD CPUID features (extended) are
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * marked present.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Checks if a intel CPUID feature is present.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Check if the address is canonical.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/*******************************************************************************
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync* Global Variables *
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync*******************************************************************************/
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncextern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the ADD instruction. */
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync/** Function table for the ADC instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the SUB instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the SBB instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the OR instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the XOR instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the AND instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the CMP instruction.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @remarks Making operand order ASSUMPTIONS.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the TEST instruction.
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync * @remarks Making operand order ASSUMPTIONS.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the BT instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the BTC instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the BTR instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the BTS instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the BSF instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the BSR instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the IMUL instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Group 1 /r lookup table. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the INC instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the DEC instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the NEG instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the NOT instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the ROL instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the ROR instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the RCL instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the RCR instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the SHL instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the SHR instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the SAR instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the MUL instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the IMUL instruction working implicitly on rAX. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the DIV instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the MUL instruction. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the SHLD instruction */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/** Function table for the SHRD instruction */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/*******************************************************************************
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync* Internal Functions *
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync*******************************************************************************/
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Initializes the decoder state.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param pIemCpu The per CPU IEM state.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync pIemCpu->uCpl = CPUMGetGuestCPL(IEMCPU_TO_VMCPU(pIemCpu), CPUMCTX2CORE(pCtx));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync : pCtx->csHid.Attr.n.u1DefBig /** @todo check if this is correct... */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Prefetch opcodes the first time when starting executing.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @returns Strict VBox status code.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param pIemCpu The IEM state.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * First translate CS:rIP to a physical address.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Assert(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync cbToTryRead = pCtx->csHid.u32Limit - GCPtrPC32 + 1;
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync /** @todo Check reserved bits and such stuff. PGM is better at doing
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * that, so do it when implementing the guest virtual address
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * TLB... */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Optimistic optimization: Use unconsumed opcode bytes from the previous
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * instruction.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync /** @todo optimize this differently by not using PGMPhysRead. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Read the bytes at this address.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync /** @todo patch manager */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - read error - rc=%Rrc\n", GCPtrPC, rc));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * exception if it fails.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @returns Strict VBox status code.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param pIemCpu The IEM state.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param cbMin Where to return the opcode byte.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncstatic VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * First translate CS:rIP to a physical address.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync cbToTryRead = pCtx->csHid.u32Limit - GCPtrNext32 + 1;
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync /** @todo Check reserved bits and such stuff. PGM is better at doing
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * that, so do it when implementing the guest virtual address
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * TLB... */
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Read the bytes at this address.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc\n", GCPtrNext, rc));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @returns Strict VBox status code.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param pIemCpu The IEM state.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param pb Where to return the opcode byte.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * Fetches the next opcode byte.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @returns Strict VBox status code.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pIemCpu The IEM state.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param pu8 Where to return the opcode byte.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
e61cd03db2217b7ec7467065af02d7ea7549149evboxsync * Fetches the next opcode byte, returns automatically on failure.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param a_pu8 Where to return the opcode byte.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @remark Implicitly references pIemCpu.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync } while (0)
12767477bc2dbc7815e4784576a15c990f5590d3vboxsync * Fetches the next signed byte from the opcode stream.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @returns Strict VBox status code.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pIemCpu The IEM state.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pi8 Where to return the signed byte.
e61cd03db2217b7ec7467065af02d7ea7549149evboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
aeca728c901587edda5cdc79092a6432ad85d3e7vboxsync return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * Fetches the next signed byte from the opcode stream, returning automatically
e61cd03db2217b7ec7467065af02d7ea7549149evboxsync * on failure.
aeca728c901587edda5cdc79092a6432ad85d3e7vboxsync * @param pi8 Where to return the signed byte.
e61cd03db2217b7ec7467065af02d7ea7549149evboxsync * @remark Implicitly references pIemCpu.
e61cd03db2217b7ec7467065af02d7ea7549149evboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync } while (0)
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @returns Strict VBox status code.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pIemCpu The IEM state.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pu16 Where to return the opcode dword.
12767477bc2dbc7815e4784576a15c990f5590d3vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * Fetches the next signed byte from the opcode stream, extending it to
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * unsigned 16-bit.
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync * @returns Strict VBox status code.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pIemCpu The IEM state.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pu16 Where to return the unsigned word.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * Fetches the next signed byte from the opcode stream and sign-extending it to
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * a word, returning automatically on failure.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pu16 Where to return the word.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @remark Implicitly references pIemCpu.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync } while (0)
c6829e595cb9bb58ee792563fcb57e961ad0a304vboxsync * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @returns Strict VBox status code.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pIemCpu The IEM state.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pu16 Where to return the opcode word.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * Fetches the next opcode word.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @returns Strict VBox status code.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pIemCpu The IEM state.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pu16 Where to return the opcode word.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Fetches the next opcode word, returns automatically on failure.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param a_pu16 Where to return the opcode word.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * @remark Implicitly references pIemCpu.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync } while (0)
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * @returns Strict VBox status code.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * @param pIemCpu The IEM state.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * @param pu32 Where to return the opcode double word.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * Fetches the next opcode word, zero extending it to a double word.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * @returns Strict VBox status code.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * @param pIemCpu The IEM state.
8c46bb2a2a52fa16f6fec46ff3d5c9e570fa0b56vboxsync * @param pu32 Where to return the opcode double word.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
8c46bb2a2a52fa16f6fec46ff3d5c9e570fa0b56vboxsync * Fetches the next opcode word and zero extends it to a double word, returns
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * automatically on failure.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * @param a_pu32 Where to return the opcode double word.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync * @remark Implicitly references pIemCpu.
62592281fc2971692c6755401b5ce1ed61b96d7cvboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync } while (0)
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @returns Strict VBox status code.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pIemCpu The IEM state.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * @param pu64 Where to return the opcode quad word.
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Fetches the next opcode word, zero extending it to a quad word.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @returns Strict VBox status code.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pIemCpu The IEM state.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pu64 Where to return the opcode quad word.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Fetches the next opcode word and zero extends it to a quad word, returns
e68e2431dbeeab80792bbd9b1c64a68fc3358d0evboxsync * automatically on failure.
84c5015eccf5ba071c02e51da99b4d7593f690fevboxsync * @param a_pu64 Where to return the opcode quad word.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @remark Implicitly references pIemCpu.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync } while (0)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Fetches the next signed word from the opcode stream.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @returns Strict VBox status code.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pIemCpu The IEM state.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pi16 Where to return the signed word.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Fetches the next signed word from the opcode stream, returning automatically
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * on failure.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param pi16 Where to return the signed word.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @remark Implicitly references pIemCpu.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync } while (0)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @returns Strict VBox status code.
24b9d11a24f96f5da0351475e0b6486ec4cb0d30vboxsync * @param pIemCpu The IEM state.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pu32 Where to return the opcode dword.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Fetches the next opcode dword.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @returns Strict VBox status code.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pIemCpu The IEM state.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pu32 Where to return the opcode double word.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Fetches the next opcode dword, returns automatically on failure.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param a_pu32 Where to return the opcode dword.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @remark Implicitly references pIemCpu.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync } while (0)
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @returns Strict VBox status code.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param pIemCpu The IEM state.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync * @param pu32 Where to return the opcode dword.
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Fetches the next opcode dword, zero extending it to a quad word.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @returns Strict VBox status code.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pIemCpu The IEM state.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param pu64 Where to return the opcode quad word.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
3c3a5ab35783f4d31cb5d3a15db9daadeb804daavboxsync *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * Fetches the next opcode dword and zero extends it to a quad word, returns
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * automatically on failure.
c785dbab313731d1f4662b4684c0808cc14703dbvboxsync * @param a_pu64 Where to return the opcode quad word.
return rcStrict2; \
return rcStrict2; \
*pu64 = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
*pu64 = 0;
return rcStrict;
return VINF_SUCCESS;
return rcStrict2; \
static VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
return rcStrict;
Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
/** @todo testcase: check if the TSS.ssX RPL is checked. */
Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
return VINF_SUCCESS;
Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
return VINF_SUCCESS;
Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->trHid.u32Limit));
return VINF_SUCCESS;
return rcStrict;
switch (u8Vector)
case X86_XCPT_DB:
static VBOXSTRICTRC
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
return rcStrict;
return rcStrict;
return rcStrict;
static VBOXSTRICTRC
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
return rcStrict;
Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
case X86_SEL_TYPE_SYS_LDT:
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
return rcStrict;
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
return rcStrict;
return rcStrict;
uNewEsp - cbStackFrame + X86DESC_BASE(DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
return rcStrict;
uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
return rcStrict;
return rcStrict;
return rcStrict;
pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
return rcStrict;
uStackFrame.pu32[0] = (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
return rcStrict;
return rcStrict;
static VBOXSTRICTRC
NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
static VBOXSTRICTRC
NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
u8Vector, pCtx->cs, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
#ifdef LOG_ENABLED
if (LogIs3Enabled())
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
return rcStrict;
#ifdef SOME_UNUSED_FUNCTION
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
switch (rc)
case VERR_PAGE_NOT_PRESENT:
uErr = 0;
case VERR_ACCESS_DENIED:
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
case IEM_OP_PRF_SIZE_OP:
case IEM_OP_PRF_SIZE_REX_W:
AssertFailed();
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
RTAssertPanic(); \
return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
typedef int ignore_semicolon
RTAssertPanic(); \
return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
typedef int ignore_semicolon
return IEMOP_RAISE_INVALID_OPCODE(); \
typedef int ignore_semicolon
return IEMOP_RAISE_INVALID_OPCODE(); \
typedef int ignore_semicolon
switch (iSegReg)
switch (iSegReg)
switch (iSegReg)
switch (iReg)
pu8Reg++;
return pu8Reg;
return *pbSrc;
#ifdef RT_ARCH_AMD64
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_SUCCESS;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return VINF_SUCCESS;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
default: AssertFailed();
return GCPtrTop;
return GCPtrTop;
return GCPtrTop;
return GCPtrTop;
#ifdef IN_RING3
switch (iEffSeg)
static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
DECL_NO_INLINE(static, void)
DECL_NO_INLINE(static, void)
iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
DECL_NO_INLINE(static, void)
DECL_NO_INLINE(static, void)
DECL_NO_INLINE(static, void)
return VINF_SUCCESS;
return VERR_NOT_FOUND;
return VINF_SUCCESS;
return VERR_NOT_FOUND;
return VINF_SUCCESS;
return VERR_NOT_FOUND;
static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
return VINF_SUCCESS;
return VERR_NOT_FOUND;
return u16Ftw;
return u8Ftw;
static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg)
return VINF_SUCCESS;
return VINF_SUCCESS;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
return VINF_SUCCESS;
case IEMMODE_64BIT:
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
return VINF_SUCCESS;
#ifdef IEM_VERIFICATION_MODE
return VERR_PGM_PHYS_TLB_CATCH_ALL;
ppvMem);
DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem)
return VERR_NOT_FOUND;
int rc;
cbFirst);
cbSecond);
cbFirst);
cbSecond);
#ifdef IEM_VERIFICATION_MODE
if (pEvtRec)
memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
if (pEvtRec)
return rc;
VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
return rcStrict;
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
return rcStrict;
int rc;
return rc;
return rc;
return rc;
rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
return rc;
#ifdef IEM_VERIFICATION_MODE
if (pEvtRec)
if (pEvtRec)
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
return rcMap;
int rc;
return rc;
#ifdef IEM_VERIFICATION_MODE
if (pEvtRec)
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
return rcStrict;
return rcStrict;
void *pvMem;
return VINF_SUCCESS;
if ( (pIemCpu->aMemMappings[iMemMap].fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_TYPE_WRITE))
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
#ifdef SOME_UNUSED_FUNCTION
static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
*pu64Dst = 0;
return rc;
static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
(void **)&pu8Src,
switch (enmOpSize)
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return rcStrict;
static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
#ifdef SOME_UNUSED_FUNCTION
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rcStrict;
static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
return rcStrict;
static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
return rcStrict;
static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
return rc;
static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
return rc;
VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
else if ((uint32_t)(uSel & X86_SEL_MASK) + 15 < (uSel & X86_SEL_LDT ? pCtx->ldtrHid.u32Limit : pCtx->gdtr.cbGdt))
rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
return rcStrict;
return rcStrict;
return rcStrict;
#include "IEMAllCImpl.cpp.h"
#define IEM_MC_END() }
#define IEM_MC_PAUSE() do {} while (0)
#define IEM_MC_CONTINUE() do {} while (0)
return rcStrict2; \
#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
#define IEM_MC_FPU_STACK_INC_TOP() \
#define IEM_MC_FPU_STACK_DEC_TOP() \
#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
#define IEM_MC_IF_FCW_IM() \
#define IEM_MC_ELSE() } else {
#define IEM_MC_ENDIF() } do {} while (0)
#ifdef DEBUG
Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs, pIemCpu->CTX_SUFF(pCtx)->rip, \
pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
#define IEMOP_HLP_NO_LOCK_PREFIX() \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
#define IEMOP_HLP_NO_64BIT() \
return IEMOP_RAISE_INVALID_OPCODE(); \
#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
#define IEMOP_HLP_DONE_DECODING() \
#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
#define SET_SS_DEF() \
case IEMMODE_16BIT:
case 0: u16EffAddr = 0; break;
return VINF_SUCCESS;
case IEMMODE_32BIT:
SET_SS_DEF();
return VINF_SUCCESS;
case IEMMODE_64BIT:
SET_SS_DEF();
return VINF_SUCCESS;
#include "IEMAllInstructions.cpp.h"
while (pEvtRec)
} while (pEvtRec);
return NULL;
if (pEvtRec)
pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
if (!pEvtRec)
return NULL;
return pEvtRec;
if (!pVCpu)
if (!pEvtRec)
if (!pVCpu)
if (!pEvtRec)
if (!pVCpu)
if (!pEvtRec)
if (!pVCpu)
if (!pEvtRec)
VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
AssertFailed();
VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
AssertFailed();
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
if (pEvtRec)
return VINF_SUCCESS;
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
if (pEvtRec)
return VINF_SUCCESS;
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
case IEMVERIFYEVENT_RAM_READ:
case IEMVERIFYEVENT_RAM_WRITE:
static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
unsigned cDiffs = 0;
case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
cDiffs++; \
RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
cDiffs++; \
RTAssertMsg2Weak(" %8sHid.Attr differs - iem=%02x - rem=%02x\n", #a_Sel, pDebugCtx->a_Sel##Hid.Attr.u, pOrgCtx->a_Sel##Hid.Attr.u); \
cDiffs++; \
pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
cDiffs++;
RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
if (cDiffs != 0)
if (LogIs3Enabled())
if (cDiffs == 0)
bool fEquals;
case IEMVERIFYEVENT_RAM_READ:
case IEMVERIFYEVENT_RAM_WRITE:
fEquals = false;
if (!fEquals)
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
return VERR_INTERNAL_ERROR;
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
return VERR_INTERNAL_ERROR;
#ifdef LOG_ENABLED
if (LogIs2Enabled())
szInstr));
if (LogIs3Enabled())
return rcStrict;
// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
b; IEM_OPCODE_GET_NEXT_U8(&b);
return rcStrict;
VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
switch (enmType)
case TRPM_HARDWARE_INT:
case TRPM_SOFTWARE_INT:
case TRPM_TRAP:
switch (u8TrapNo)
case X86_XCPT_DF:
case X86_XCPT_TS:
case X86_XCPT_NP:
case X86_XCPT_SS:
case X86_XCPT_PF:
case X86_XCPT_AC:
return VERR_NOT_IMPLEMENTED;
return VERR_NOT_IMPLEMENTED;