IEMAll.cpp revision 3e289133c2cd4a14140904eb6009390fc2de8836
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * IEM - Interpreted Execution Manager - All Contexts.
2f0e6b8d79026c7efad10a34945eb120097d7f54vboxsync * Copyright (C) 2011-2013 Oracle Corporation
cf5f6bf2704d4fff443139e10bccc6a0a7fa4b85vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
cf5f6bf2704d4fff443139e10bccc6a0a7fa4b85vboxsync * available from http://www.virtualbox.org. This file is free software;
cf5f6bf2704d4fff443139e10bccc6a0a7fa4b85vboxsync * you can redistribute it and/or modify it under the terms of the GNU
cf5f6bf2704d4fff443139e10bccc6a0a7fa4b85vboxsync * General Public License (GPL) as published by the Free Software
cf5f6bf2704d4fff443139e10bccc6a0a7fa4b85vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
cf5f6bf2704d4fff443139e10bccc6a0a7fa4b85vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
cf5f6bf2704d4fff443139e10bccc6a0a7fa4b85vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
2f0e6b8d79026c7efad10a34945eb120097d7f54vboxsync/** @page pg_iem IEM - Interpreted Execution Manager
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * The interpreted exeuction manager (IEM) is for executing short guest code
43747b1f0bc8302a238fb35e55857a5e9aa1933dvboxsync * sequences that are causing too many exits / virtualization traps. It will
43747b1f0bc8302a238fb35e55857a5e9aa1933dvboxsync * also be used to interpret single instructions, thus replacing the selective
43747b1f0bc8302a238fb35e55857a5e9aa1933dvboxsync * interpreters in EM and IOM.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Design goals:
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * - Relatively small footprint, although we favour speed and correctness
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * over size.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * - Reasonably fast.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * - Correctly handle lock prefixed instructions.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * - Complete instruction set - eventually.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * - Refactorable into a recompiler, maybe.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * - Replace EMInterpret*.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Using the existing disassembler has been considered, however this is thought
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * to conflict with speed as the disassembler chews things a bit too much while
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * leaving us with a somewhat complicated state to interpret afterwards.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * The current code is very much work in progress. You've been warned!
2f0e6b8d79026c7efad10a34945eb120097d7f54vboxsync * @section sec_iem_fpu_instr FPU Instructions
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
2f0e6b8d79026c7efad10a34945eb120097d7f54vboxsync * same or equivalent instructions on the host FPU. To make life easy, we also
2f0e6b8d79026c7efad10a34945eb120097d7f54vboxsync * let the FPU prioritize the unmasked exceptions for us. This however, only
2f0e6b8d79026c7efad10a34945eb120097d7f54vboxsync * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
aef51041e0fe31e8ea903dd7e67fe12cef645654vboxsync * for FPU exception delivery, because with CR0.NE=0 there is a window where we
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * can trigger spurious FPU exceptions.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * The guest FPU state is not loaded into the host CPU and kept there till we
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * leave IEM because the calling conventions have declared an all year open
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * season on much of the FPU state. For instance an innocent looking call to
aef51041e0fe31e8ea903dd7e67fe12cef645654vboxsync * memcpy might end up using a whole bunch of XMM or MM registers if the
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * particular implementation finds it worthwhile.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @section sec_iem_logging Logging
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * The IEM code uses the \"IEM\" log group for the main logging. The different
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * logging levels/flags are generally used for the following purposes:
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * - Flow (LogFlow): Basic enter/exit IEM state info.
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * - Level 2 (Log2): ?
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * - Level 3 (Log3): More detailed enter/exit IEM state info.
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * - Level 4 (Log4): Decoding mnemonics w/ EIP.
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * - Level 5 (Log5): Decoding details.
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
79baed6836ae36c5f15b182292387484dcf7a752vboxsync/** @def IEM_VERIFICATION_MODE_MINIMAL
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * Use for pitting IEM against EM or something else in ring-0 or raw-mode
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * context. */
79baed6836ae36c5f15b182292387484dcf7a752vboxsync//#define IEM_VERIFICATION_MODE_MINIMAL
79baed6836ae36c5f15b182292387484dcf7a752vboxsync//#define IEM_LOG_MEMORY_WRITES
aef51041e0fe31e8ea903dd7e67fe12cef645654vboxsync/*******************************************************************************
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync* Header Files *
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync*******************************************************************************/
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/*******************************************************************************
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync* Structures and Typedefs *
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync*******************************************************************************/
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** @typedef PFNIEMOP
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Pointer to an opcode decoder function.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** @def FNIEMOP_DEF
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Define an opcode decoder function.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * We're using macors for this so that adding and removing parameters as well as
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param a_Name The function name.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsynctypedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PIEMCPU pIemCpu);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsynctypedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PIEMCPU pIemCpu);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu) RT_NO_THROW
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu) RT_NO_THROW
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0) RT_NO_THROW
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync static VBOXSTRICTRC a_Name(PIEMCPU pIemCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /** The legacy view. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /** The long mode view. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Pointer to a selector descriptor table entry. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/*******************************************************************************
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync* Defined Constants And Macros *
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync*******************************************************************************/
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** @name IEM status codes.
178b942cb42a13f3bca0f99a1bca1311ae190ffdvboxsync * Not quite sure how this will play out in the end, just aliasing safe status
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * codes for now.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Temporary hack to disable the double execution. Will be removed in favor
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * of a dedicated execution mode in EM. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync//#define IEM_VERIFICATION_MODE_NO_REM
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
178b942cb42a13f3bca0f99a1bca1311ae190ffdvboxsync * due to GCC lacking knowledge about the value range of a switch. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * occation.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync } while (0)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * occation using the supplied logger statement.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param a_LoggerArgs What to log on failure.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /*LogFunc(a_LoggerArgs);*/ \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync } while (0)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Call an opcode decoder function.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * We're using macors for this so that adding and removing parameters can be
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * done as we please. See FNIEMOP_DEF.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Call a common opcode decoder function taking one extra argument.
178b942cb42a13f3bca0f99a1bca1311ae190ffdvboxsync * We're using macors for this so that adding and removing parameters can be
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * done as we please. See FNIEMOP_DEF_1.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pIemCpu, a0)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Call a common opcode decoder function taking one extra argument.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * We're using macors for this so that adding and removing parameters can be
ad27e1d5e48ca41245120c331cc88b50464813cevboxsync * done as we please. See FNIEMOP_DEF_1.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pIemCpu, a0, a1)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Check if we're currently executing in real or virtual 8086 mode.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @returns @c true if it is, @c false if not.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param a_pIemCpu The IEM state of the current CPU.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_REAL_OR_V86_MODE(a_pIemCpu) (CPUMIsGuestInRealOrV86ModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Check if we're currently executing in long mode.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @returns @c true if it is, @c false if not.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param a_pIemCpu The IEM state of the current CPU.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_LONG_MODE(a_pIemCpu) (CPUMIsGuestInLongModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Check if we're currently executing in real mode.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @returns @c true if it is, @c false if not.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param a_pIemCpu The IEM state of the current CPU.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_REAL_MODE(a_pIemCpu) (CPUMIsGuestInRealModeEx((a_pIemCpu)->CTX_SUFF(pCtx)))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Tests if an AMD CPUID feature (extended) is marked present - ECX.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_ECX(a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, 0, (a_fEcx))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Tests if an AMD CPUID feature (extended) is marked present - EDX.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_AMD_CPUID_FEATURE_PRESENT_EDX(a_fEdx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Tests if at least on of the specified AMD CPUID features (extended) are
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * marked present.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(a_fEdx, a_fEcx) iemRegIsAmdCpuIdFeaturePresent(pIemCpu, (a_fEdx), (a_fEcx))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Checks if an Intel CPUID feature is present.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(a_fEdx) \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync || iemRegIsIntelCpuIdFeaturePresent(pIemCpu, (a_fEdx), 0) )
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Evaluates to true if we're presenting an Intel CPU to the guest.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_GUEST_CPU_INTEL(a_pIemCpu) (true) /** @todo determin this once and store it the CPU structure */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Evaluates to true if we're presenting an AMD CPU to the guest.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_GUEST_CPU_AMD(a_pIemCpu) (false) /** @todo determin this once and store it the CPU structure */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Check if the address is canonical.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#define IEM_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/*******************************************************************************
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync* Global Variables *
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync*******************************************************************************/
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsyncextern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the ADD instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the ADC instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the SUB instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the SBB instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the OR instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the XOR instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the AND instruction. */
0440d9525af9b3ca795668ff408ac93ea6ca0249vboxsync/** Function table for the CMP instruction.
0440d9525af9b3ca795668ff408ac93ea6ca0249vboxsync * @remarks Making operand order ASSUMPTIONS.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the TEST instruction.
58ab0ad45444e80bdd970ddeb468d0872dbbbb47vboxsync * @remarks Making operand order ASSUMPTIONS.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the BT instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the BTC instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the BTR instruction. */
0440d9525af9b3ca795668ff408ac93ea6ca0249vboxsync/** Function table for the BTS instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the BSF instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the BSR instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the IMUL instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Group 1 /r lookup table. */
178b942cb42a13f3bca0f99a1bca1311ae190ffdvboxsync/** Function table for the INC instruction. */
befced03fd84a13590b8ce8be8c2480e9bc568c6vboxsync/** Function table for the DEC instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the NEG instruction. */
178b942cb42a13f3bca0f99a1bca1311ae190ffdvboxsync/** Function table for the NOT instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the ROL instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the ROR instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the RCL instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the RCR instruction. */
16d723d9d597f4872dd4c2c960af9cbca4ed63bdvboxsync/** Function table for the SHL instruction. */
16d723d9d597f4872dd4c2c960af9cbca4ed63bdvboxsync/** Function table for the SHR instruction. */
79baed6836ae36c5f15b182292387484dcf7a752vboxsync/** Function table for the SAR instruction. */
79baed6836ae36c5f15b182292387484dcf7a752vboxsync/** Function table for the MUL instruction. */
79baed6836ae36c5f15b182292387484dcf7a752vboxsync/** Function table for the IMUL instruction working implicitly on rAX. */
79baed6836ae36c5f15b182292387484dcf7a752vboxsync/** Function table for the DIV instruction. */
79baed6836ae36c5f15b182292387484dcf7a752vboxsync/** Function table for the MUL instruction. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the SHLD instruction */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** Function table for the SHRD instruction */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** What IEM just wrote. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/** How much IEM just wrote. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync/*******************************************************************************
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync* Internal Functions *
79baed6836ae36c5f15b182292387484dcf7a752vboxsync*******************************************************************************/
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PIEMCPU pIemCpu);
79baed6836ae36c5f15b182292387484dcf7a752vboxsync/*static VBOXSTRICTRC iemRaiseSelectorNotPresent(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);*/
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsyncstatic VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel);
69c6eff4fcb6dc027e94a3e7908926c4e8ef5aefvboxsyncstatic VBOXSTRICTRC iemRaiseGeneralProtectionFault(PIEMCPU pIemCpu, uint16_t uErr);
69c6eff4fcb6dc027e94a3e7908926c4e8ef5aefvboxsyncstatic VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PIEMCPU pIemCpu);
69c6eff4fcb6dc027e94a3e7908926c4e8ef5aefvboxsyncstatic VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL uSel);
69c6eff4fcb6dc027e94a3e7908926c4e8ef5aefvboxsyncstatic VBOXSTRICTRC iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
69c6eff4fcb6dc027e94a3e7908926c4e8ef5aefvboxsyncstatic VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PIEMCPU pIemCpu, RTSEL Sel);
69c6eff4fcb6dc027e94a3e7908926c4e8ef5aefvboxsyncstatic VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemRaiseAlignmentCheckException(PIEMCPU pIemCpu);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemMemCommitAndUnmap(PIEMCPU pIemCpu, void *pvMem, uint32_t fAccess);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsyncstatic VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemMemFetchSelDesc(PIEMCPU pIemCpu, PIEMSELDESC pDesc, uint16_t uSel);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemMemStackPushCommitSpecial(PIEMCPU pIemCpu, void *pvMem, uint64_t uNewRsp);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemMemMarkSelDescAccessed(PIEMCPU pIemCpu, uint16_t uSel);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic uint16_t iemSRegFetchU16(PIEMCPU pIemCpu, uint8_t iSegReg);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic PIEMVERIFYEVTREC iemVerifyAllocRecord(PIEMCPU pIemCpu);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * Sets the pass up status.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @returns VINF_SUCCESS.
79baed6836ae36c5f15b182292387484dcf7a752vboxsync * @param pIemCpu The per CPU IEM state of the calling thread.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param rcPassUp The pass up status. Must be informational.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * VINF_SUCCESS is not allowed.
79baed6836ae36c5f15b182292387484dcf7a752vboxsyncstatic int iemSetPassUpStatus(PIEMCPU pIemCpu, VBOXSTRICTRC rcPassUp)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* If both are EM scheduling code, use EM priority rules. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Override EM scheduling with specific status code. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Don't override specific status code, first come first served. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Initializes the decoder state.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param pIemCpu The per CPU IEM state.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param fBypassHandlers Whether to bypass access handlers.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsyncDECLINLINE(void) iemInitDecoder(PIEMCPU pIemCpu, bool fBypassHandlers)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
79baed6836ae36c5f15b182292387484dcf7a752vboxsync : pCtx->cs.Attr.n.u1DefBig /** @todo check if this is correct... */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync pIemCpu->enmDefAddrMode = enmMode; /** @todo check if this is correct... */
27152389a84c6dec057fba6fc21241991e079006vboxsync pIemCpu->enmDefOpSize = enmMode; /** @todo check if this is correct... */
27152389a84c6dec057fba6fc21241991e079006vboxsync && PATMIsPatchGCAddr(IEMCPU_TO_VM(pIemCpu), pCtx->eip);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Prefetch opcodes the first time when starting executing.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param pIemCpu The IEM state.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * @param fBypassHandlers Whether to bypass access handlers.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsyncstatic VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PIEMCPU pIemCpu, bool fBypassHandlers)
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * First translate CS:rIP to a physical address.
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync /* Allow interpretation of patch manager code blocks since they can for
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync instance throw #PFs for perfectly good reasons. */
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
459b38316f81309fe908aa10e8abee332a6fdfd3vboxsync memcpy(pIemCpu->abOpcode, (void const *)(uintptr_t)GCPtrPC, cbToTryRead);
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPC, &fFlags, &GCPhys);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync return iemRaisePageFault(pIemCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync /** @todo Check reserved bits and such stuff. PGM is better at doing
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync * that, so do it when implementing the guest virtual address
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync * TLB... */
459b38316f81309fe908aa10e8abee332a6fdfd3vboxsync * Optimistic optimization: Use unconsumed opcode bytes from the previous
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync * instruction.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync /** @todo optimize this differently by not using PGMPhysRead. */
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync RTGCPHYS const offPrevOpcodes = GCPhys - pIemCpu->GCPhysOpcodes;
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pIemCpu->abOpcode))
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync memmove(&pIemCpu->abOpcode[0], &pIemCpu->abOpcode[offPrevOpcodes], cbNew);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * Read the bytes at this address.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync /** @todo PATM: Read original, unpatched bytes? EMAll.cpp doesn't seem to be
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * doing that. */
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, pIemCpu->abOpcode, cbToTryRead);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pIemCpu->abOpcode, GCPhys, cbToTryRead);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync /** @todo status code handling */
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * exception if it fails.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * @returns Strict VBox status code.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * @param pIemCpu The IEM state.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * @param cbMin Where to return the opcode byte.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsyncstatic VBOXSTRICTRC iemOpcodeFetchMoreBytes(PIEMCPU pIemCpu, size_t cbMin)
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * First translate CS:rIP to a physical address.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync uint8_t cbLeft = pIemCpu->cbOpcode - pIemCpu->offOpcode; Assert(cbLeft < cbMin);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync cbToTryRead = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pIemCpu->enmCpuMode == IEMMODE_32BIT);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync int rc = PGMGstGetPage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrNext, &fFlags, &GCPhys);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemRaisePageFault(pIemCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pIemCpu->cbOpcode));
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync /** @todo Check reserved bits and such stuff. PGM is better at doing
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * that, so do it when implementing the guest virtual address
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * TLB... */
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * Read the bytes at this address.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync if (cbToTryRead > sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode)
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync cbToTryRead = sizeof(pIemCpu->abOpcode) - pIemCpu->cbOpcode;
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync rc = PGMPhysRead(IEMCPU_TO_VM(pIemCpu), GCPhys, &pIemCpu->abOpcode[pIemCpu->cbOpcode], cbToTryRead);
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), &pIemCpu->abOpcode[pIemCpu->cbOpcode], GCPhys, cbToTryRead);
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync /** @todo status code handling */
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync Log5(("%.*Rhxs\n", pIemCpu->cbOpcode, pIemCpu->abOpcode));
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
459b38316f81309fe908aa10e8abee332a6fdfd3vboxsync * @returns Strict VBox status code.
2b84e8395b8111251c409df711d4c1ec363d030bvboxsync * @param pIemCpu The IEM state.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * @param pb Where to return the opcode byte.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PIEMCPU pIemCpu, uint8_t *pb)
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 1);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next opcode byte.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu8 Where to return the opcode byte.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PIEMCPU pIemCpu, uint8_t *pu8)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Fetches the next opcode byte, returns automatically on failure.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param a_pu8 Where to return the opcode byte.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pIemCpu, (a_pu8)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Fetches the next signed byte from the opcode stream.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * @param pIemCpu The IEM state.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * @param pi8 Where to return the signed byte.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PIEMCPU pIemCpu, int8_t *pi8)
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemOpcodeGetNextU8(pIemCpu, (uint8_t *)pi8);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next signed byte from the opcode stream, returning automatically
27152389a84c6dec057fba6fc21241991e079006vboxsync * on failure.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pi8 Where to return the signed byte.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pIemCpu, (a_pi8)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu16 Where to return the opcode dword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next signed byte from the opcode stream, extending it to
27152389a84c6dec057fba6fc21241991e079006vboxsync * unsigned 16-bit.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param pu16 Where to return the unsigned word.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PIEMCPU pIemCpu, uint16_t *pu16)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next signed byte from the opcode stream and sign-extending it to
27152389a84c6dec057fba6fc21241991e079006vboxsync * a word, returning automatically on failure.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu16 Where to return the word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pIemCpu, (a_pu16)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu32 Where to return the opcode dword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next signed byte from the opcode stream, extending it to
27152389a84c6dec057fba6fc21241991e079006vboxsync * unsigned 32-bit.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu32 Where to return the unsigned dword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PIEMCPU pIemCpu, uint32_t *pu32)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Fetches the next signed byte from the opcode stream and sign-extending it to
27152389a84c6dec057fba6fc21241991e079006vboxsync * a word, returning automatically on failure.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param pu32 Where to return the word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pIemCpu, (a_pu32)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu64 Where to return the opcode qword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pIemCpu, &u8);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next signed byte from the opcode stream, extending it to
27152389a84c6dec057fba6fc21241991e079006vboxsync * unsigned 64-bit.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu64 Where to return the unsigned qword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Fetches the next signed byte from the opcode stream and sign-extending it to
27152389a84c6dec057fba6fc21241991e079006vboxsync * a word, returning automatically on failure.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu64 Where to return the word.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @remark Implicitly references pIemCpu.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pIemCpu, (a_pu64)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu16 Where to return the opcode word.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PIEMCPU pIemCpu, uint16_t *pu16)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next opcode word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu16 Where to return the opcode word.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PIEMCPU pIemCpu, uint16_t *pu16)
27152389a84c6dec057fba6fc21241991e079006vboxsync if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
79baed6836ae36c5f15b182292387484dcf7a752vboxsync *pu16 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next opcode word, returns automatically on failure.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param a_pu16 Where to return the opcode word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
79baed6836ae36c5f15b182292387484dcf7a752vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pIemCpu, (a_pu16)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu32 Where to return the opcode double word.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Fetches the next opcode word, zero extending it to a double word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
895d68f6e6c15446ca98b8f6ec96ce19d7ea356fvboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu32 Where to return the opcode double word.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PIEMCPU pIemCpu, uint32_t *pu32)
27152389a84c6dec057fba6fc21241991e079006vboxsync if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemOpcodeGetNextU16ZxU32Slow(pIemCpu, pu32);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu32 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Fetches the next opcode word and zero extends it to a double word, returns
27152389a84c6dec057fba6fc21241991e079006vboxsync * automatically on failure.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param a_pu32 Where to return the opcode double word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pIemCpu, (a_pu32)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param pu64 Where to return the opcode quad word.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 2);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Fetches the next opcode word, zero extending it to a quad word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu64 Where to return the opcode quad word.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync if (RT_UNLIKELY(offOpcode + 2 > pIemCpu->cbOpcode))
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemOpcodeGetNextU16ZxU64Slow(pIemCpu, pu64);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu64 = RT_MAKE_U16(pIemCpu->abOpcode[offOpcode], pIemCpu->abOpcode[offOpcode + 1]);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Fetches the next opcode word and zero extends it to a quad word, returns
27152389a84c6dec057fba6fc21241991e079006vboxsync * automatically on failure.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param a_pu64 Where to return the opcode quad word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pIemCpu, (a_pu64)); \
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next signed word from the opcode stream.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pi16 Where to return the signed word.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PIEMCPU pIemCpu, int16_t *pi16)
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemOpcodeGetNextU16(pIemCpu, (uint16_t *)pi16);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next signed word from the opcode stream, returning automatically
895d68f6e6c15446ca98b8f6ec96ce19d7ea356fvboxsync * on failure.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pi16 Where to return the signed word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pIemCpu, (a_pi16)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu32 Where to return the opcode dword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PIEMCPU pIemCpu, uint32_t *pu32)
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next opcode dword.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu32 Where to return the opcode double word.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PIEMCPU pIemCpu, uint32_t *pu32)
27152389a84c6dec057fba6fc21241991e079006vboxsync if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
895d68f6e6c15446ca98b8f6ec96ce19d7ea356fvboxsync * Fetches the next opcode dword, returns automatically on failure.
ce9015004a47e95eb75e047ba42f2d3200d2d222vboxsync * @param a_pu32 Where to return the opcode dword.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pIemCpu, (a_pu32)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu32 Where to return the opcode dword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next opcode dword, zero extending it to a quad word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu64 Where to return the opcode quad word.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PIEMCPU pIemCpu, uint64_t *pu64)
27152389a84c6dec057fba6fc21241991e079006vboxsync if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
895d68f6e6c15446ca98b8f6ec96ce19d7ea356fvboxsync return iemOpcodeGetNextU32ZxU64Slow(pIemCpu, pu64);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu64 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next opcode dword and zero extends it to a quad word, returns
27152389a84c6dec057fba6fc21241991e079006vboxsync * automatically on failure.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param a_pu64 Where to return the opcode quad word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pIemCpu, (a_pu64)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next signed double word from the opcode stream.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pi32 Where to return the signed double word.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PIEMCPU pIemCpu, int32_t *pi32)
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemOpcodeGetNextU32(pIemCpu, (uint32_t *)pi32);
5558d00238c7e019ef8c0803358fae6edeba394evboxsync * Fetches the next signed double word from the opcode stream, returning
5558d00238c7e019ef8c0803358fae6edeba394evboxsync * automatically on failure.
5558d00238c7e019ef8c0803358fae6edeba394evboxsync * @param pi32 Where to return the signed double word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
ce9015004a47e95eb75e047ba42f2d3200d2d222vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pIemCpu, (a_pi32)); \
ce9015004a47e95eb75e047ba42f2d3200d2d222vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu64 Where to return the opcode qword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 4);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next opcode dword, sign extending it into a quad word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
895d68f6e6c15446ca98b8f6ec96ce19d7ea356fvboxsync * @param pIemCpu The IEM state.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync * @param pu64 Where to return the opcode quad word.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64)
27152389a84c6dec057fba6fc21241991e079006vboxsync if (RT_UNLIKELY(offOpcode + 4 > pIemCpu->cbOpcode))
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemOpcodeGetNextS32SxU64Slow(pIemCpu, pu64);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync int32_t i32 = RT_MAKE_U32_FROM_U8(pIemCpu->abOpcode[offOpcode],
feef019e08ac3cead66d03feb9094cac75d9bc0dvboxsync * Fetches the next opcode double word and sign extends it to a quad word,
feef019e08ac3cead66d03feb9094cac75d9bc0dvboxsync * returns automatically on failure.
feef019e08ac3cead66d03feb9094cac75d9bc0dvboxsync * @param a_pu64 Where to return the opcode quad word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
feef019e08ac3cead66d03feb9094cac75d9bc0dvboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pIemCpu, (a_pu64)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu64 Where to return the opcode qword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECL_NO_INLINE(static, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PIEMCPU pIemCpu, uint64_t *pu64)
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pIemCpu, 8);
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next opcode qword.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns Strict VBox status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM state.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pu64 Where to return the opcode qword.
27152389a84c6dec057fba6fc21241991e079006vboxsyncDECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PIEMCPU pIemCpu, uint64_t *pu64)
27152389a84c6dec057fba6fc21241991e079006vboxsync if (RT_UNLIKELY(offOpcode + 8 > pIemCpu->cbOpcode))
27152389a84c6dec057fba6fc21241991e079006vboxsync *pu64 = RT_MAKE_U64_FROM_U8(pIemCpu->abOpcode[offOpcode],
27152389a84c6dec057fba6fc21241991e079006vboxsync * Fetches the next opcode quad word, returns automatically on failure.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param a_pu64 Where to return the opcode quad word.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @remark Implicitly references pIemCpu.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pIemCpu, (a_pu64)); \
27152389a84c6dec057fba6fc21241991e079006vboxsync } while (0)
27152389a84c6dec057fba6fc21241991e079006vboxsync/** @name Misc Worker Functions.
27152389a84c6dec057fba6fc21241991e079006vboxsync * Validates a new SS segment.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @returns VBox strict status code.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pIemCpu The IEM per CPU instance data.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pCtx The CPU context.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param NewSS The new SS selctor.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param uCpl The CPL to load the stack for.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param pDesc Where to return the descriptor.
3fa7a7e633f46a212052b510cdb8cee41f279a67vboxsyncstatic VBOXSTRICTRC iemMiscValidateNewSS(PIEMCPU pIemCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
27152389a84c6dec057fba6fc21241991e079006vboxsync /* Null selectors are not allowed (we're not called for dispatching
27152389a84c6dec057fba6fc21241991e079006vboxsync interrupts with SS=0 in long mode). */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemMiscValidateNewSSandRsp: #x - null selector -> #GP(0)\n", NewSS));
27152389a84c6dec057fba6fc21241991e079006vboxsync * Read the descriptor.
27152389a84c6dec057fba6fc21241991e079006vboxsync VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, pDesc, NewSS);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
27152389a84c6dec057fba6fc21241991e079006vboxsync Log(("iemMiscValidateNewSSandRsp: %#x - system selector -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
27152389a84c6dec057fba6fc21241991e079006vboxsync if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
27152389a84c6dec057fba6fc21241991e079006vboxsync || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
27152389a84c6dec057fba6fc21241991e079006vboxsync Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
27152389a84c6dec057fba6fc21241991e079006vboxsync /** @todo testcase: check if the TSS.ssX RPL is checked. */
27152389a84c6dec057fba6fc21241991e079006vboxsync Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #GP\n", NewSS, uCpl));
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
27152389a84c6dec057fba6fc21241991e079006vboxsync Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #GP\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
27152389a84c6dec057fba6fc21241991e079006vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);
27152389a84c6dec057fba6fc21241991e079006vboxsync /* Is it there? */
27152389a84c6dec057fba6fc21241991e079006vboxsync /** @todo testcase: Is this checked before the canonical / limit check below? */
5558d00238c7e019ef8c0803358fae6edeba394evboxsync Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
5558d00238c7e019ef8c0803358fae6edeba394evboxsync return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewSS);
27152389a84c6dec057fba6fc21241991e079006vboxsync * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param a_pIemCpu The IEM per CPU data.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @param a_pCtx The CPU context.
27152389a84c6dec057fba6fc21241991e079006vboxsync * Updates the EFLAGS in the correct manner wrt. PATM.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param a_pIemCpu The IEM per CPU data.
27152389a84c6dec057fba6fc21241991e079006vboxsync * @param a_pCtx The CPU context.
ccbb9941eeafd9ec56b8ed048910198240e6caaavboxsync# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync CPUMRawSetEFlags(IEMCPU_TO_VMCPU(a_pIemCpu), a_fEfl); \
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync } while (0)
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync# define IEMMISC_SET_EFL(a_pIemCpu, a_pCtx, a_fEfl) \
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync } while (0)
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync/** @name Raising Exceptions.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync/** CPU exception. */
27152389a84c6dec057fba6fc21241991e079006vboxsync/** External interrupt (from PIC, APIC, whatever). */
27152389a84c6dec057fba6fc21241991e079006vboxsync/** Software interrupt (int or into, not bound).
27152389a84c6dec057fba6fc21241991e079006vboxsync * Returns to the following instruction */
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync/** Takes an error code. */
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync/** Takes a CR2. */
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync/** Generated by the breakpoint instruction. */
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync * Loads the specified stack far pointer from the TSS.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync * @returns VBox strict status code.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync * @param pIemCpu The IEM per CPU instance data.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync * @param pCtx The CPU context.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync * @param uCpl The CPL to load the stack for.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync * @param pSelSS Where to return the new stack segment.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsync * @param puEsp Where to return the new stack pointer.
45afcc40bbf641d3dc10d85acbdf48b67b46aed4vboxsyncstatic VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl,
27152389a84c6dec057fba6fc21241991e079006vboxsync * 16-bit TSS (X86TSS16).
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
27152389a84c6dec057fba6fc21241991e079006vboxsync Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemFetchSysU32(pIemCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
178b942cb42a13f3bca0f99a1bca1311ae190ffdvboxsync * 32-bit TSS (X86TSS32).
27152389a84c6dec057fba6fc21241991e079006vboxsync case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemFetchSysU64(pIemCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
333691af03965f6009137429427d69047046e5cbvboxsync * Loads the specified stack pointer from the 64-bit TSS.
333691af03965f6009137429427d69047046e5cbvboxsync * @returns VBox strict status code.
333691af03965f6009137429427d69047046e5cbvboxsync * @param pIemCpu The IEM per CPU instance data.
333691af03965f6009137429427d69047046e5cbvboxsync * @param pCtx The CPU context.
333691af03965f6009137429427d69047046e5cbvboxsync * @param uCpl The CPL to load the stack for.
333691af03965f6009137429427d69047046e5cbvboxsync * @param uIst The interrupt stack table index, 0 if to use uCpl.
333691af03965f6009137429427d69047046e5cbvboxsync * @param puRsp Where to return the new stack pointer.
d16692c6ba83a5fa6bb4dd44652002a64e2bf70avboxsyncstatic VBOXSTRICTRC iemRaiseLoadStackFromTss64(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst,
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_INTERNAL_ERROR_2);
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync return iemMemFetchSysU64(pIemCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * Adjust the CPU state according to the exception being raised.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @param pCtx The CPU context.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @param u8Vector The exception that has been raised.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsyncDECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync /** @todo Read the AMD and Intel exception reference... */
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * Implements exceptions and interrupts for real mode.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @returns VBox strict status code.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @param pIemCpu The IEM per CPU instance data.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @param pCtx The CPU context.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @param cbInstr The number of bytes to offset rIP by in the return
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @param u8Vector The interrupt / exception vector number.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @param fFlags The flags.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
359416647a711739d1b14addbf399178949a1a60vboxsync AssertReturn(pIemCpu->enmCpuMode == IEMMODE_16BIT, VERR_INTERNAL_ERROR_3);
359416647a711739d1b14addbf399178949a1a60vboxsync * Read the IDT entry.
359416647a711739d1b14addbf399178949a1a60vboxsync Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
359416647a711739d1b14addbf399178949a1a60vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
d16692c6ba83a5fa6bb4dd44652002a64e2bf70avboxsync VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pIemCpu, (uint32_t *)&Idte, UINT8_MAX,
6370aaf3748669b056c6ede9b64ff5f837a17820vboxsync * Push the stack frame.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemStackPushBeginSpecial(pIemCpu, 6, (void **)&pu16Frame, &uNewRsp);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Load the vector address into cs:ip and make exception specific state
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * adjustments.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /** @todo do we actually do this in real mode? */
ad290511521ce8388a9926b165241ecf83c330a7vboxsync return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
7ea49b4765b66fc68d2e6c1cb2a647b53a4aea24vboxsync * Implements exceptions and interrupts for protected mode.
ad290511521ce8388a9926b165241ecf83c330a7vboxsync * @returns VBox strict status code.
7ea49b4765b66fc68d2e6c1cb2a647b53a4aea24vboxsync * @param pIemCpu The IEM per CPU instance data.
ad290511521ce8388a9926b165241ecf83c330a7vboxsync * @param pCtx The CPU context.
7ea49b4765b66fc68d2e6c1cb2a647b53a4aea24vboxsync * @param cbInstr The number of bytes to offset rIP by in the return
7ea49b4765b66fc68d2e6c1cb2a647b53a4aea24vboxsync * @param u8Vector The interrupt / exception vector number.
7ea49b4765b66fc68d2e6c1cb2a647b53a4aea24vboxsync * @param fFlags The flags.
ad290511521ce8388a9926b165241ecf83c330a7vboxsync * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
7ea49b4765b66fc68d2e6c1cb2a647b53a4aea24vboxsync * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
e50527359fb17a5127ef6291e691c8f09d726157vboxsync * Read the IDT entry.
dee9595a04fb1bd1908a48bd602fa1ed0b6967f9vboxsync Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
dee9595a04fb1bd1908a48bd602fa1ed0b6967f9vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4d6b317d67ba577744e53cdfa0c7472d4223db5avboxsync VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.u, UINT8_MAX,
4d6b317d67ba577744e53cdfa0c7472d4223db5avboxsync Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
e68a2ad82506ee1197ef0a520d7ffebca1e33a0cvboxsync u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4d6b317d67ba577744e53cdfa0c7472d4223db5avboxsync Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
dee9595a04fb1bd1908a48bd602fa1ed0b6967f9vboxsync * Check the descriptor type, DPL and such.
dee9595a04fb1bd1908a48bd602fa1ed0b6967f9vboxsync * ASSUMES this is done in the same order as described for call-gate calls.
e68a2ad82506ee1197ef0a520d7ffebca1e33a0cvboxsync Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
e68a2ad82506ee1197ef0a520d7ffebca1e33a0cvboxsync return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
e68a2ad82506ee1197ef0a520d7ffebca1e33a0cvboxsync uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /** @todo check what actually happens when the type is wrong...
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * esp. call gates. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /** @todo task gates. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Check DPL against CPL if applicable. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Is it there? */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* A null CS is bad. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
47b6f2e161ff99e6ab1b4f6090798276954b4a78vboxsync /* Fetch the descriptor for the new CS. */
47b6f2e161ff99e6ab1b4f6090798276954b4a78vboxsync rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
648e22edef0d6e2e0573a96fc77b09003675f6a4vboxsync Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Must be a code segment. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
11d3005e2935c925665896fa26fde09b3e656d70vboxsync if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync /* Don't allow lowering the privilege level. */
bdbed0b8e7fb553d01417fdc976a76f3b287dbe2vboxsync /** @todo Does the lowering of privileges apply to software interrupts
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * only? This has bearings on the more-privileged or
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * same-privilege stack behavior further down. A testcase would
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * be nice. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Make sure the selector is present. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Check the new EIP against the new CS limit. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * If the privilege level changes, we need to get a new stack from the TSS.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * This in turns means validating the new SS and ESP...
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemRaiseLoadStackFromTss32Or16(pIemCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
47b6f2e161ff99e6ab1b4f6090798276954b4a78vboxsync rcStrict = iemMiscValidateNewSS(pIemCpu, pCtx, NewSS, uNewCpl, &DescSS);
47b6f2e161ff99e6ab1b4f6090798276954b4a78vboxsync /* Check that there is sufficient space for the stack frame. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Expand down segments\n")); /** @todo Implement expand down segment support. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 24 : 20;
47b6f2e161ff99e6ab1b4f6090798276954b4a78vboxsync Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
47b6f2e161ff99e6ab1b4f6090798276954b4a78vboxsync return iemRaiseSelectorBoundsBySelector(pIemCpu, NewSS);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Start making changes.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Create the stack frame. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
bd88a03fe4f970611c171f081be318fcd74e85e8vboxsync uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
bd88a03fe4f970611c171f081be318fcd74e85e8vboxsync uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
bd88a03fe4f970611c171f081be318fcd74e85e8vboxsync uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
bd88a03fe4f970611c171f081be318fcd74e85e8vboxsync rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
bd88a03fe4f970611c171f081be318fcd74e85e8vboxsync /* Mark the selectors 'accessed' (hope this is the correct time). */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /** @todo testcase: excatly _when_ are the accessed bits set - before or
58461e707998a927c19da46b98748ee2b79f4190vboxsync * after pushing the stack frame? (Write protect the gdt + stack to
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * find out.) */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
e6b70a63c722fdf132fdf96257aa00a1e0e37e3dvboxsync rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewSS);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Start comitting the register changes (joins with the DPL=CPL branch).
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Same privilege, no stack change and smaller stack frame.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync uint8_t const cbStackFrame = fFlags & IEM_XCPT_FLAGS_ERR ? 16 : 12;
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemStackPushBeginSpecial(pIemCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pIemCpu->uCpl;
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemCommitAndUnmap(pIemCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Mark the CS selector as 'accessed'. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemMarkSelDescAccessed(pIemCpu, NewCS);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Start committing the register changes (joins with the other branch).
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* ... register committing continues. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
85e5ab5adbba74b522731762dd05ca88cb529140vboxsync * Implements exceptions and interrupts for V8086 mode.
85e5ab5adbba74b522731762dd05ca88cb529140vboxsync * @returns VBox strict status code.
85e5ab5adbba74b522731762dd05ca88cb529140vboxsync * @param pIemCpu The IEM per CPU instance data.
85e5ab5adbba74b522731762dd05ca88cb529140vboxsync * @param pCtx The CPU context.
85e5ab5adbba74b522731762dd05ca88cb529140vboxsync * @param cbInstr The number of bytes to offset rIP by in the return
85e5ab5adbba74b522731762dd05ca88cb529140vboxsync * @param u8Vector The interrupt / exception vector number.
85e5ab5adbba74b522731762dd05ca88cb529140vboxsync * @param fFlags The flags.
85e5ab5adbba74b522731762dd05ca88cb529140vboxsync * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
e637cb22e348f5665d5473dae55ed785aa7b6e9avboxsync * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
e637cb22e348f5665d5473dae55ed785aa7b6e9avboxsync NOREF(pIemCpu); NOREF(pCtx); NOREF(cbInstr); NOREF(u8Vector); NOREF(fFlags); NOREF(uErr); NOREF(uCr2);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /** @todo implement me. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("V8086 exception / interrupt dispatching\n"));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Implements exceptions and interrupts for long mode.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * @returns VBox strict status code.
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync * @param pIemCpu The IEM per CPU instance data.
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync * @param pCtx The CPU context.
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync * @param cbInstr The number of bytes to offset rIP by in the return
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync * @param u8Vector The interrupt / exception vector number.
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync * @param fFlags The flags.
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Read the IDT entry.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
ce9015004a47e95eb75e047ba42f2d3200d2d222vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
ce9015004a47e95eb75e047ba42f2d3200d2d222vboxsync VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
64e889d83afc98d310a0c40fb458d23733b73b03vboxsync rcStrict = iemMemFetchSysU64(pIemCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
64e889d83afc98d310a0c40fb458d23733b73b03vboxsync Log(("iemiemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
64e889d83afc98d310a0c40fb458d23733b73b03vboxsync u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
ce9015004a47e95eb75e047ba42f2d3200d2d222vboxsync Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * Check the descriptor type, DPL and such.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * ASSUMES this is done in the same order as described for call-gate calls.
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
79baed6836ae36c5f15b182292387484dcf7a752vboxsync uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Check DPL against CPL if applicable. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pIemCpu->uCpl, Idte.Gate.u2Dpl));
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync /* Is it there? */
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync return iemRaiseSelectorNotPresentWithErr(pIemCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* A null CS is bad. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Fetch the descriptor for the new CS. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, NewCS);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Must be a 64-bit code segment. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /* Don't allow lowering the privilege level. For non-conforming CS
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync selectors, the CS.DPL sets the privilege level the trap/interrupt
388f65e835e6a1dde31e13590eb32681819a2c23vboxsync handler runs at. For conforming CS selectors, the CPL remains
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync unchanged, but the CS.DPL must be <= CPL. */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync * when CPU in Ring-0. Result \#GP? */
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2ca380caf80f0dacc65f8c996077e827318f1c69vboxsync return iemRaiseGeneralProtectionFault(pIemCpu, NewCS & X86_SEL_MASK_OFF_RPL);
79baed6836ae36c5f15b182292387484dcf7a752vboxsync /* Make sure the selector is present. */
79baed6836ae36c5f15b182292387484dcf7a752vboxsync Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
79baed6836ae36c5f15b182292387484dcf7a752vboxsync return iemRaiseSelectorNotPresentBySelector(pIemCpu, NewCS);
79baed6836ae36c5f15b182292387484dcf7a752vboxsync /* Check that the new RIP is canonical. */
Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
return rcStrict;
return rcStrict;
return rcStrict;
return rcStrict;
Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pIemCpu->uCurXcpt, pIemCpu->cXcptRecursions + 1, fPrevXcpt));
#ifdef DEBUG_bird
AssertFailed();
if (LogIs3Enabled())
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pIemCpu->uCpl));
return rcStrict;
#ifdef SOME_UNUSED_FUNCTION
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PIEMCPU pIemCpu, uint16_t uErr)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PIEMCPU pIemCpu, uint32_t iSegReg)
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PIEMCPU pIemCpu, uint16_t uSel)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PIEMCPU pIemCpu, RTSEL Sel)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorBounds(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PIEMCPU pIemCpu, uint32_t iSegReg, uint32_t fAccess)
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
DECL_NO_INLINE(static, VBOXSTRICTRC) iemRaisePageFault(PIEMCPU pIemCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
switch (rc)
case VERR_PAGE_NOT_PRESENT:
uErr = 0;
case VERR_ACCESS_DENIED:
return iemRaiseXcptOrInt(pIemCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
case IEM_OP_PRF_SIZE_OP:
case IEM_OP_PRF_SIZE_REX_W:
AssertFailed();
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
# define IEMOP_BITCH_ABOUT_STUB() \
RTAssertPanic(); \
return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
typedef int ignore_semicolon
return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
typedef int ignore_semicolon
return IEMOP_RAISE_INVALID_OPCODE(); \
typedef int ignore_semicolon
return IEMOP_RAISE_INVALID_OPCODE(); \
typedef int ignore_semicolon
switch (iSegReg)
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
return pSReg;
switch (iSegReg)
switch (iSegReg)
switch (iReg)
pu8Reg++;
return pu8Reg;
return *pbSrc;
#ifdef RT_ARCH_AMD64
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_SUCCESS;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return VINF_SUCCESS;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
default: AssertFailed();
return GCPtrTop;
return GCPtrTop;
return GCPtrTop;
return GCPtrTop;
#ifdef IN_RING3
switch (iEffSeg)
static void iemFpuStoreResultOnly(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, PCPUMCTX pCtx)
static void iemFpuPushResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
static void iemFpuStoreResultWithMemOp(PIEMCPU pIemCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
static void iemFpuUpdateFSWWithMemOp(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
static void iemFpuUpdateFSWWithMemOpThenPop(PIEMCPU pIemCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
DECL_NO_INLINE(static, void)
DECL_NO_INLINE(static, void)
iemFpuStackUnderflowWithMemOpThenPop(PIEMCPU pIemCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
DECL_NO_INLINE(static, void)
DECL_NO_INLINE(static, void)
DECL_NO_INLINE(static, void)
return VINF_SUCCESS;
return VERR_NOT_FOUND;
return VINF_SUCCESS;
return VERR_NOT_FOUND;
return VINF_SUCCESS;
return VERR_NOT_FOUND;
static int iemFpu2StRegsNotEmptyRefFirst(PIEMCPU pIemCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
return VINF_SUCCESS;
return VERR_NOT_FOUND;
return u16Ftw;
return u8Ftw;
if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
|| (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
static VBOXSTRICTRC iemMemSegCheckWriteAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemSegCheckReadAccessEx(PIEMCPU pIemCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
return VINF_SUCCESS;
return VINF_SUCCESS;
case IEMMODE_16BIT:
case IEMMODE_32BIT:
return VINF_SUCCESS;
case IEMMODE_64BIT:
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PIEMCPU pIemCpu, RTGCPTR GCPtrMem, uint32_t fAccess,
Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
return iemRaisePageFault(pIemCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
return VINF_SUCCESS;
static int iemMemPageMap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
#ifdef IEM_VERIFICATION_MODE_FULL
return VERR_PGM_PHYS_TLB_CATCH_ALL;
#ifdef IEM_LOG_MEMORY_WRITES
return VERR_PGM_PHYS_TLB_CATCH_ALL;
return VERR_PGM_PHYS_TLB_CATCH_ALL;
pLock);
return rc;
DECLINLINE(void) iemMemPageUnmap(PIEMCPU pIemCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
return VERR_NOT_FOUND;
int rc;
#ifndef IEM_VERIFICATION_MODE_MINIMAL
cbFirst);
cbSecond);
cbFirst);
cbSecond);
if (pEvtRec)
memcpy(pEvtRec->u.RamWrite.ab, &pIemCpu->aBounceBuffers[iMemMap].ab[0], pIemCpu->aMemBbMappings[iMemMap].cbFirst);
if (pEvtRec)
RT_MAX(RT_MIN(pIemCpu->aMemBbMappings[iMemMap].cbFirst, 64), 1), &pIemCpu->aBounceBuffers[iMemMap].ab[0]));
size_t cbWrote = pIemCpu->aMemBbMappings[iMemMap].cbFirst + pIemCpu->aMemBbMappings[iMemMap].cbSecond;
memcpy(g_abIemWrote, &pIemCpu->aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
return rc;
VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst, fAccess, &GCPhysFirst);
return rcStrict;
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrFirst + (cbMem - 1), fAccess, &GCPhysSecond);
return rcStrict;
int rc;
return rc;
return rc;
Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
return rc;
rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
return rc;
if (pEvtRec)
if (pEvtRec)
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemBounceBufferMapPhys(PIEMCPU pIemCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
return rcMap;
int rc;
return rc;
if (pEvtRec)
#ifdef VBOX_STRICT
#ifdef VBOX_STRICT
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemMap(PIEMCPU pIemCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
return rcStrict;
return rcStrict;
void *pvMem;
rcStrict = iemMemPageMap(pIemCpu, GCPhysFirst, fAccess, &pvMem, &pIemCpu->aMemMappingLocks[iMemMap].Lock);
return VINF_SUCCESS;
return VINF_SUCCESS;
static VBOXSTRICTRC iemMemFetchDataU8(PIEMCPU pIemCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
static VBOXSTRICTRC iemMemFetchDataU16(PIEMCPU pIemCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
static VBOXSTRICTRC iemMemFetchDataU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
#ifdef SOME_UNUSED_FUNCTION
static VBOXSTRICTRC iemMemFetchDataS32SxU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
*pu64Dst = 0;
return rc;
static VBOXSTRICTRC iemMemFetchDataU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
static VBOXSTRICTRC iemMemFetchDataR80(PIEMCPU pIemCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
return rc;
(void **)&pu8Src,
switch (enmOpSize)
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return rcStrict;
static VBOXSTRICTRC iemMemStoreDataU8(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
static VBOXSTRICTRC iemMemStoreDataU16(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
static VBOXSTRICTRC iemMemStoreDataU32(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
static VBOXSTRICTRC iemMemStoreDataU64(PIEMCPU pIemCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
return rc;
(void **)&pu8Src,
return rcStrict;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rc;
VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
return rcStrict;
static VBOXSTRICTRC iemMemStackPushBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
return rcStrict;
static VBOXSTRICTRC iemMemStackPopBeginSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
static VBOXSTRICTRC iemMemStackPopContinueSpecial(PIEMCPU pIemCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
static VBOXSTRICTRC iemMemStackPopCommitSpecial(PIEMCPU pIemCpu, void const *pvMem, uint64_t uNewRsp)
return rcStrict;
static VBOXSTRICTRC iemMemFetchSysU32(PIEMCPU pIemCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
return rc;
static VBOXSTRICTRC iemMemFetchSysU64(PIEMCPU pIemCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
VBOXSTRICTRC rc = iemMemMap(pIemCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
return rc;
VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
return rcStrict;
return rcStrict;
return rcStrict;
#include "IEMAllCImpl.cpp.h"
#define IEM_MC_END() }
#define IEM_MC_PAUSE() do {} while (0)
#define IEM_MC_CONTINUE() do {} while (0)
return rcStrict2; \
#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pIemCpu, a_i16))
#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pIemCpu, a_i32))
#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u16NewIP)))
#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u32NewIP)))
#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pIemCpu), (a_u64NewIP)))
#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pIemCpu)->CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pIemCpu, (a_iGReg))
#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pIemCpu, (a_iSReg))
#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u16Value)
#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) = (a_u64Value)
#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= UINT32_MAX
#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = (uint16_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = (uint32_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = (uint64_t *)iemGRegRef(pIemCpu, (a_iGReg))
#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u8Value)
#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u16Value)
#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) += (a_u64Value)
#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u8Value)
#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u16Value)
#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) -= (a_u64Value)
#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pIemCpu, (a_iGReg)); } while (0)
#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u8Value)
#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u16Value)
#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) &= (a_u64Value)
#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *(uint8_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u8Value)
#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *(uint16_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u16Value)
#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) |= (a_u64Value)
#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pIemCpu)->CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
#define IEM_MC_CLEAR_FSW_EX() do { (pIemCpu)->CTX_SUFF(pCtx)->fpu.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pIemCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pIemCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pIemCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
IEM_MC_RETURN_ON_FAILURE(iemMemMap(pIemCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3, a4)
#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1)
#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2)
#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
#define IEM_MC_FPU_STACK_INC_TOP() \
#define IEM_MC_FPU_STACK_DEC_TOP() \
#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
#define IEM_MC_USED_FPU() \
#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (*(uint64_t *)iemGRegRef(pIemCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
if (iemFpu2StRegsNotEmptyRef(pIemCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
#define IEM_MC_IF_FCW_IM() \
#define IEM_MC_ELSE() } else {
#define IEM_MC_ENDIF() } do {} while (0)
#ifdef DEBUG
Log4(("decode - %04x:%RGv %s%s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", pIemCpu->CTX_SUFF(pCtx)->cs.Sel, pIemCpu->CTX_SUFF(pCtx)->rip, \
pIemCpu->fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pIemCpu->cInstructions))
#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
#define IEMOP_HLP_NO_LOCK_PREFIX() \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
#define IEMOP_HLP_NO_64BIT() \
return IEMOP_RAISE_INVALID_OPCODE(); \
#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
#define IEMOP_HLP_64BIT_OP_SIZE() \
#define IEMOP_HLP_DONE_DECODING() \
#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
static VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PIEMCPU pIemCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
#define SET_SS_DEF() \
case 0: u16EffAddr = 0; break;
SET_SS_DEF();
SET_SS_DEF();
return VINF_SUCCESS;
#include "IEMAllInstructions.cpp.h"
int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
while (pEvtRec)
} while (pEvtRec);
return NULL;
if (pEvtRec)
pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(IEMCPU_TO_VM(pIemCpu), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
if (!pEvtRec)
return NULL;
return pEvtRec;
if (!pVCpu)
if (!pEvtRec)
if (!pVCpu)
if (!pEvtRec)
if (!pVCpu)
if (!pEvtRec)
if (!pVCpu)
if (!pEvtRec)
VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrDst, RTGCUINTREG cTransfers, size_t cbValue)
AssertFailed();
VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, RTGCPTR GCPtrSrc, RTGCUINTREG cTransfers, size_t cbValue)
AssertFailed();
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
if (pEvtRec)
return VINF_SUCCESS;
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
if (pEvtRec)
return VINF_SUCCESS;
"gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
case IEMVERIFYEVENT_RAM_READ:
case IEMVERIFYEVENT_RAM_WRITE:
static void iemVerifyAssertRecords(PIEMCPU pIemCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
int rc = PGMPhysSimpleReadGCPhys(IEMCPU_TO_VM(pIemCpu), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
unsigned cDiffs = 0;
case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - rem=%04x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - rem=%08x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - rem=%016llx\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); break; \
cDiffs++; \
RTAssertMsg2Weak(" %8s differs - iem=%02x - rem=%02x\n", #a_Field, pDebugCtx->a_Field, pOrgCtx->a_Field); \
cDiffs++; \
pOrgCtx->fpu.MXCSR_MASK = pDebugCtx->fpu.MXCSR_MASK; /* only for the time being - old snapshots here. */
cDiffs++;
RTAssertMsg2Weak(" rflags differs - iem=%08llx rem=%08llx\n", pDebugCtx->rflags.u, pOrgCtx->rflags.u);
/* Klugde #1: REM fetches code and accross the page boundrary and faults on the next page, while we execute
/* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
if ((pOrgCtx->dr[7] & ~X86_DR7_MB1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_MB1_MASK)) /* REM 'mov drX,greg' bug.*/
if (cDiffs != 0)
bool fEquals;
case IEMVERIFYEVENT_RAM_READ:
case IEMVERIFYEVENT_RAM_WRITE:
fEquals = false;
if (!fEquals)
static VBOXSTRICTRC iemVerifyFakeIOPortRead(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
return VERR_INTERNAL_ERROR;
static VBOXSTRICTRC iemVerifyFakeIOPortWrite(PIEMCPU pIemCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
return VERR_INTERNAL_ERROR;
DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, PIEMCPU pIemCpu, bool fExecuteInhibit)
// AssertMsg(pIemCpu->offOpcode == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", pIemCpu->offOpcode, cbInstr));
if ( fExecuteInhibit
b; IEM_OPCODE_GET_NEXT_U8(&b);
AssertMsg(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
#ifdef IEM_VERIFICATION_MODE_FULL
#if defined(IEM_VERIFICATION_MODE_FULL)
return rcStrict;
#ifdef IN_RC
DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PIEMCPU pIemCpu, PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
return rcStrict;
#ifdef LOG_ENABLED
# ifdef IN_RING3
if (LogIs2Enabled())
szInstr));
if (LogIs3Enabled())
#ifdef IN_RC
return rcStrict;
if (pcbWritten)
#ifdef IN_RC
return rcStrict;
VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
if ( cbOpcodeBytes
#ifdef IN_RC
return rcStrict;
if (pcbWritten)
#ifdef IN_RC
return rcStrict;
VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
if ( cbOpcodeBytes
#ifdef IN_RC
return rcStrict;
#ifdef IEM_VERIFICATION_MODE_FULL
int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
#ifdef LOG_ENABLED
# ifdef IN_RING3
if (LogIs2Enabled())
szInstr));
if (LogIs3Enabled())
#ifdef IN_RC
return rcStrict;
VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2)
switch (enmType)
case TRPM_HARDWARE_INT:
case TRPM_SOFTWARE_INT:
case TRPM_TRAP:
switch (u8TrapNo)
case X86_XCPT_DF:
case X86_XCPT_TS:
case X86_XCPT_NP:
case X86_XCPT_SS:
case X86_XCPT_PF:
case X86_XCPT_AC:
return VERR_NOT_IMPLEMENTED;
return VERR_NOT_IMPLEMENTED;
return rcStrict;