IEMAllInstructions.cpp.h revision 93f5e318bdffb66ddc6001c0b0e7ea1ca046887d
/* $Id$ */
/** @file
* IEM - Instruction Decoding and Emulation.
*/
/*
* Copyright (C) 2011-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/**
* Common worker for instructions like ADD, AND, OR, ++ with a byte
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
else
{
/*
* We're accessing memory.
* Note! We're putting the eflags on the stack here so we can commit them
* after the memory.
*/
uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
}
return VINF_SUCCESS;
}
/**
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
if (pImpl != &g_iemAImpl_test)
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
else
{
/*
* We're accessing memory.
* Note! We're putting the eflags on the stack here so we can commit them
* after the memory.
*/
uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/**
* Common worker for byte instructions like ADD, AND, OR, ++ with a register as
* the destination.
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
else
{
/*
* We're accessing memory.
*/
IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/**
* register as the destination.
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
else
{
/*
* We're accessing memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/**
* Common worker for instructions like ADD, AND, OR, ++ with working on AL with
* a byte immediate.
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/**
* Common worker for instructions like ADD, AND, OR, ++ with working on
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(3, 0);
if (pImpl != &g_iemAImpl_test)
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcodes 0xf1, 0xd6. */
{
IEMOP_MNEMONIC("Invalid");
return IEMOP_RAISE_INVALID_OPCODE();
}
/** @name ..... opcodes.
*
* @{
*/
/** @} */
/** @name Two byte opcodes (first byte 0x0f).
*
* @{
*/
/** Opcode 0x0f 0x00 /0. */
{
IEMOP_MNEMONIC("sldt Rv/Mw");
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
}
else
{
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x00 /1. */
{
IEMOP_MNEMONIC("str Rv/Mw");
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
}
else
{
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x00 /2. */
{
IEMOP_MNEMONIC("lldt Ew");
{
IEM_MC_BEGIN(1, 0);
IEM_MC_END();
}
else
{
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x00 /3. */
{
IEMOP_MNEMONIC("ltr Ew");
{
IEM_MC_BEGIN(1, 0);
IEM_MC_END();
}
else
{
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x00 /4. */
/** Opcode 0x0f 0x00 /5. */
/** Opcode 0x0f 0x00. */
{
{
case 6: return IEMOP_RAISE_INVALID_OPCODE();
case 7: return IEMOP_RAISE_INVALID_OPCODE();
}
}
/** Opcode 0x0f 0x01 /0. */
{
IEMOP_MNEMONIC("sgdt Ms");
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x01 /0. */
{
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /0. */
{
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /0. */
{
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /0. */
{
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /1. */
{
IEMOP_MNEMONIC("sidt Ms");
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x01 /1. */
{
IEMOP_MNEMONIC("monitor");
IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo Verify that monitor is allergic to lock prefixes. */
}
/** Opcode 0x0f 0x01 /1. */
{
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_mwait);
}
/** Opcode 0x0f 0x01 /2. */
{
IEMOP_MNEMONIC("lgdt");
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x01 /2. */
{
AssertFailed();
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /2. */
{
AssertFailed();
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /3. */
{
: pIemCpu->enmEffOpSize;
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x01 0xd8. */
/** Opcode 0x0f 0x01 0xd9. */
/** Opcode 0x0f 0x01 0xda. */
/** Opcode 0x0f 0x01 0xdb. */
/** Opcode 0x0f 0x01 0xdc. */
/** Opcode 0x0f 0x01 0xdd. */
/** Opcode 0x0f 0x01 0xde. */
/** Opcode 0x0f 0x01 0xdf. */
/** Opcode 0x0f 0x01 /4. */
{
IEMOP_MNEMONIC("smsw");
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* Ignore operand size here, memory refs are always 16-bit. */
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x0f 0x01 /6. */
{
/* The operand size is effectively ignored, all is 16-bit and only the
lower 3-bits are used. */
IEMOP_MNEMONIC("lmsw");
{
IEM_MC_BEGIN(1, 0);
IEM_MC_END();
}
else
{
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x01 /7. */
{
IEMOP_MNEMONIC("invlpg");
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x01 /7. */
{
IEMOP_MNEMONIC("swapgs");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_swapgs);
}
/** Opcode 0x0f 0x01 /7. */
{
return VERR_IEM_INSTR_NOT_IMPLEMENTED;
}
/** Opcode 0x0f 0x01. */
{
{
case 0:
switch (bRm & X86_MODRM_RM_MASK)
{
}
return IEMOP_RAISE_INVALID_OPCODE();
case 1:
switch (bRm & X86_MODRM_RM_MASK)
{
case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
}
return IEMOP_RAISE_INVALID_OPCODE();
case 2:
switch (bRm & X86_MODRM_RM_MASK)
{
case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
}
return IEMOP_RAISE_INVALID_OPCODE();
case 3:
switch (bRm & X86_MODRM_RM_MASK)
{
case 0: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmrun);
}
case 4:
case 5:
return IEMOP_RAISE_INVALID_OPCODE();
case 6:
case 7:
switch (bRm & X86_MODRM_RM_MASK)
{
case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
}
return IEMOP_RAISE_INVALID_OPCODE();
}
}
/** Opcode 0x0f 0x02. */
/** Opcode 0x0f 0x03. */
/** Opcode 0x0f 0x04. */
{
IEMOP_MNEMONIC("syscall");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_syscall);
}
/** Opcode 0x0f 0x05. */
{
IEMOP_MNEMONIC("clts");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
}
/** Opcode 0x0f 0x06. */
{
IEMOP_MNEMONIC("sysret");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sysret);
}
/** Opcode 0x0f 0x08. */
/** Opcode 0x0f 0x09. */
{
IEMOP_MNEMONIC("wbinvd");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS; /* ignore for now */
}
/** Opcode 0x0f 0x0b. */
/** Opcode 0x0f 0x0d. */
{
/* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */
if (!IEM_IS_AMD_CPUID_FEATURES_ANY_PRESENT(X86_CPUID_EXT_FEATURE_EDX_LONG_MODE | X86_CPUID_AMD_FEATURE_EDX_3DNOW,
{
IEMOP_MNEMONIC("GrpP");
return IEMOP_RAISE_INVALID_OPCODE();
}
{
IEMOP_MNEMONIC("GrpP");
return IEMOP_RAISE_INVALID_OPCODE();
}
{
case 2: /* Aliased to /0 for the time being. */
case 4: /* Aliased to /0 for the time being. */
case 5: /* Aliased to /0 for the time being. */
case 6: /* Aliased to /0 for the time being. */
case 7: /* Aliased to /0 for the time being. */
case 0: IEMOP_MNEMONIC("prefetch"); break;
}
IEM_MC_BEGIN(0, 1);
/* Currently a NOP. */
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x0e. */
/** Opcode 0x0f 0x0f 0x0c. */
/** Opcode 0x0f 0x0f 0x0d. */
/** Opcode 0x0f 0x0f 0x1c. */
/** Opcode 0x0f 0x0f 0x1d. */
/** Opcode 0x0f 0x0f 0x8a. */
/** Opcode 0x0f 0x0f 0x8e. */
/** Opcode 0x0f 0x0f 0x90. */
/** Opcode 0x0f 0x0f 0x94. */
/** Opcode 0x0f 0x0f 0x96. */
/** Opcode 0x0f 0x0f 0x97. */
/** Opcode 0x0f 0x0f 0x9a. */
/** Opcode 0x0f 0x0f 0x9e. */
/** Opcode 0x0f 0x0f 0xa0. */
/** Opcode 0x0f 0x0f 0xa4. */
/** Opcode 0x0f 0x0f 0xa6. */
/** Opcode 0x0f 0x0f 0xa7. */
/** Opcode 0x0f 0x0f 0xaa. */
/** Opcode 0x0f 0x0f 0xae. */
/** Opcode 0x0f 0x0f 0xb0. */
/** Opcode 0x0f 0x0f 0xb4. */
/** Opcode 0x0f 0x0f 0xb6. */
/** Opcode 0x0f 0x0f 0xb7. */
/** Opcode 0x0f 0x0f 0xbb. */
/** Opcode 0x0f 0x0f 0xbf. */
/** Opcode 0x0f 0x0f. */
{
{
IEMOP_MNEMONIC("3Dnow");
return IEMOP_RAISE_INVALID_OPCODE();
}
/* This is pretty sparse, use switch instead of table. */
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
switch (b)
{
default:
return IEMOP_RAISE_INVALID_OPCODE();
}
}
/** Opcode 0x0f 0x10. */
/** Opcode 0x0f 0x11. */
/** Opcode 0x0f 0x12. */
/** Opcode 0x0f 0x13. */
/** Opcode 0x0f 0x14. */
/** Opcode 0x0f 0x15. */
/** Opcode 0x0f 0x16. */
/** Opcode 0x0f 0x17. */
/** Opcode 0x0f 0x18. */
{
{
{
case 4: /* Aliased to /0 for the time being according to AMD. */
case 5: /* Aliased to /0 for the time being according to AMD. */
case 6: /* Aliased to /0 for the time being according to AMD. */
case 7: /* Aliased to /0 for the time being according to AMD. */
case 0: IEMOP_MNEMONIC("prefetchNTA m8"); break;
}
IEM_MC_BEGIN(0, 1);
/* Currently a NOP. */
IEM_MC_END();
return VINF_SUCCESS;
}
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x19..0x1f. */
{
{
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 1);
/* Currently a NOP. */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x20. */
{
/* mod is ignored, as is operand size overrides. */
IEMOP_MNEMONIC("mov Rd,Cd");
else
{
/* The lock prefix can be used to encode CR8 accesses on some CPUs. */
return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
iCrReg |= 8;
}
switch (iCrReg)
{
case 0: case 2: case 3: case 4: case 8:
break;
default:
return IEMOP_RAISE_INVALID_OPCODE();
}
return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
}
/** Opcode 0x0f 0x21. */
{
IEMOP_MNEMONIC("mov Rd,Dd");
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x22. */
{
/* mod is ignored, as is operand size overrides. */
IEMOP_MNEMONIC("mov Cd,Rd");
else
{
/* The lock prefix can be used to encode CR8 accesses on some CPUs. */
return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
iCrReg |= 8;
}
switch (iCrReg)
{
case 0: case 2: case 3: case 4: case 8:
break;
default:
return IEMOP_RAISE_INVALID_OPCODE();
}
return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
}
/** Opcode 0x0f 0x23. */
{
IEMOP_MNEMONIC("mov Dd,Rd");
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x24. */
{
IEMOP_MNEMONIC("mov Rd,Td");
/* The RM byte is not considered, see testcase. */
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x26. */
{
IEMOP_MNEMONIC("mov Td,Rd");
/* The RM byte is not considered, see testcase. */
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x28. */
/** Opcode 0x0f 0x29. */
/** Opcode 0x0f 0x2a. */
/** Opcode 0x0f 0x2b. */
/** Opcode 0x0f 0x2c. */
/** Opcode 0x0f 0x2d. */
/** Opcode 0x0f 0x2e. */
/** Opcode 0x0f 0x2f. */
/** Opcode 0x0f 0x30. */
{
IEMOP_MNEMONIC("wrmsr");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_wrmsr);
}
/** Opcode 0x0f 0x31. */
{
IEMOP_MNEMONIC("rdtsc");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
}
/** Opcode 0x0f 0x33. */
{
IEMOP_MNEMONIC("rdmsr");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdmsr);
}
/** Opcode 0x0f 0x34. */
/** Opcode 0x0f 0x34. */
/** Opcode 0x0f 0x35. */
/** Opcode 0x0f 0x37. */
/** Opcode 0x0f 0x38. */
/** Opcode 0x0f 0x3a. */
/** Opcode 0x0f 0x3c (?). */
/**
* Implements a conditional move.
*
* Wish there was an obvious way to do this where we could share and reduce
* code bloat.
*
* @param a_Cnd The conditional "microcode" operation.
*/
{ \
switch (pIemCpu->enmEffOpSize) \
{ \
case IEMMODE_16BIT: \
IEM_MC_BEGIN(0, 1); \
a_Cnd { \
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
return VINF_SUCCESS; \
\
case IEMMODE_32BIT: \
IEM_MC_BEGIN(0, 1); \
a_Cnd { \
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
} IEM_MC_ELSE() { \
IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
return VINF_SUCCESS; \
\
case IEMMODE_64BIT: \
IEM_MC_BEGIN(0, 1); \
a_Cnd { \
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
return VINF_SUCCESS; \
\
} \
} \
else \
{ \
switch (pIemCpu->enmEffOpSize) \
{ \
case IEMMODE_16BIT: \
IEM_MC_BEGIN(0, 2); \
a_Cnd { \
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
return VINF_SUCCESS; \
\
case IEMMODE_32BIT: \
IEM_MC_BEGIN(0, 2); \
a_Cnd { \
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
} IEM_MC_ELSE() { \
IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
return VINF_SUCCESS; \
\
case IEMMODE_64BIT: \
IEM_MC_BEGIN(0, 2); \
a_Cnd { \
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
return VINF_SUCCESS; \
\
} \
} do {} while (0)
/** Opcode 0x0f 0x40. */
{
IEMOP_MNEMONIC("cmovo Gv,Ev");
}
/** Opcode 0x0f 0x41. */
{
IEMOP_MNEMONIC("cmovno Gv,Ev");
}
/** Opcode 0x0f 0x42. */
{
IEMOP_MNEMONIC("cmovc Gv,Ev");
}
/** Opcode 0x0f 0x43. */
{
IEMOP_MNEMONIC("cmovnc Gv,Ev");
}
/** Opcode 0x0f 0x44. */
{
IEMOP_MNEMONIC("cmove Gv,Ev");
}
/** Opcode 0x0f 0x45. */
{
IEMOP_MNEMONIC("cmovne Gv,Ev");
}
/** Opcode 0x0f 0x46. */
{
IEMOP_MNEMONIC("cmovbe Gv,Ev");
}
/** Opcode 0x0f 0x47. */
{
IEMOP_MNEMONIC("cmovnbe Gv,Ev");
}
/** Opcode 0x0f 0x48. */
{
IEMOP_MNEMONIC("cmovs Gv,Ev");
}
/** Opcode 0x0f 0x49. */
{
IEMOP_MNEMONIC("cmovns Gv,Ev");
}
/** Opcode 0x0f 0x4a. */
{
IEMOP_MNEMONIC("cmovp Gv,Ev");
}
/** Opcode 0x0f 0x4b. */
{
IEMOP_MNEMONIC("cmovnp Gv,Ev");
}
/** Opcode 0x0f 0x4c. */
{
IEMOP_MNEMONIC("cmovl Gv,Ev");
}
/** Opcode 0x0f 0x4d. */
{
IEMOP_MNEMONIC("cmovnl Gv,Ev");
}
/** Opcode 0x0f 0x4e. */
{
IEMOP_MNEMONIC("cmovle Gv,Ev");
}
/** Opcode 0x0f 0x4f. */
{
IEMOP_MNEMONIC("cmovnle Gv,Ev");
}
/** Opcode 0x0f 0x50. */
/** Opcode 0x0f 0x51. */
/** Opcode 0x0f 0x52. */
/** Opcode 0x0f 0x53. */
/** Opcode 0x0f 0x54. */
/** Opcode 0x0f 0x55. */
/** Opcode 0x0f 0x56. */
/** Opcode 0x0f 0x57. */
/** Opcode 0x0f 0x58. */
/** Opcode 0x0f 0x59. */
/** Opcode 0x0f 0x5a. */
/** Opcode 0x0f 0x5b. */
/** Opcode 0x0f 0x5c. */
/** Opcode 0x0f 0x5d. */
/** Opcode 0x0f 0x5e. */
/** Opcode 0x0f 0x5f. */
/** Opcode 0x0f 0x60. */
/** Opcode 0x0f 0x61. */
/** Opcode 0x0f 0x62. */
/** Opcode 0x0f 0x63. */
/** Opcode 0x0f 0x64. */
/** Opcode 0x0f 0x65. */
/** Opcode 0x0f 0x66. */
/** Opcode 0x0f 0x67. */
/** Opcode 0x0f 0x68. */
/** Opcode 0x0f 0x69. */
/** Opcode 0x0f 0x6a. */
/** Opcode 0x0f 0x6b. */
/** Opcode 0x0f 0x6c. */
/** Opcode 0x0f 0x6d. */
/** Opcode 0x0f 0x6e. */
{
{
IEM_MC_BEGIN(0, 1);
{
/* XMM, greg*/
{
IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
}
else
{
IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
}
}
else
{
/* MMX, greg */
else
IEM_MC_STORE_MREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
}
IEM_MC_END();
}
else
{
/* memory source operand. */
IEM_MC_BEGIN(0, 2);
{
/* XMM, [mem] */
{
IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
}
else
{
IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
}
}
else
{
/* MMX, [mem] */
{
IEM_MC_STORE_MREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
}
else
{
IEM_MC_STORE_MREG_U32_ZX_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
}
}
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x6f. */
/** Opcode 0x0f 0x70. */
FNIEMOP_STUB(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib); // NEXT
/** Opcode 0x0f 0x71 11/2. */
/** Opcode 0x66 0x0f 0x71 11/2. */
/** Opcode 0x0f 0x71 11/4. */
/** Opcode 0x66 0x0f 0x71 11/4. */
/** Opcode 0x0f 0x71 11/6. */
/** Opcode 0x66 0x0f 0x71 11/6. */
/** Opcode 0x0f 0x71. */
{
return IEMOP_RAISE_INVALID_OPCODE();
{
case 0: case 1: case 3: case 5: case 7:
return IEMOP_RAISE_INVALID_OPCODE();
case 2:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
case 4:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
case 6:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
}
}
/** Opcode 0x0f 0x72 11/2. */
/** Opcode 0x66 0x0f 0x72 11/2. */
/** Opcode 0x0f 0x72 11/4. */
/** Opcode 0x66 0x0f 0x72 11/4. */
/** Opcode 0x0f 0x72 11/6. */
/** Opcode 0x66 0x0f 0x72 11/6. */
/** Opcode 0x0f 0x72. */
{
return IEMOP_RAISE_INVALID_OPCODE();
{
case 0: case 1: case 3: case 5: case 7:
return IEMOP_RAISE_INVALID_OPCODE();
case 2:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
case 4:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
case 6:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
}
}
/** Opcode 0x0f 0x73 11/2. */
/** Opcode 0x66 0x0f 0x73 11/2. */
/** Opcode 0x66 0x0f 0x73 11/3. */
/** Opcode 0x0f 0x73 11/6. */
/** Opcode 0x66 0x0f 0x73 11/6. */
/** Opcode 0x66 0x0f 0x73 11/7. */
/** Opcode 0x0f 0x73. */
{
return IEMOP_RAISE_INVALID_OPCODE();
{
case 0: case 1: case 4: case 5:
return IEMOP_RAISE_INVALID_OPCODE();
case 2:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
case 3:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
case 6:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
case 7:
{
default: return IEMOP_RAISE_INVALID_OPCODE();
}
}
}
/** Opcode 0x0f 0x74. */
/** Opcode 0x0f 0x75. */
/** Opcode 0x0f 0x76. */
/** Opcode 0x0f 0x77. */
/** Opcode 0x0f 0x78. */
/** Opcode 0x0f 0x79. */
/** Opcode 0x0f 0x7c. */
/** Opcode 0x0f 0x7d. */
/** Opcode 0x0f 0x7e. */
/** Opcode 0x0f 0x7f. */
/** Opcode 0x0f 0x80. */
{
IEMOP_MNEMONIC("jo Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x81. */
{
IEMOP_MNEMONIC("jno Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x82. */
{
IEMOP_MNEMONIC("jc/jb/jnae Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x83. */
{
IEMOP_MNEMONIC("jnc/jnb/jae Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x84. */
{
IEMOP_MNEMONIC("je/jz Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x85. */
{
IEMOP_MNEMONIC("jne/jnz Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x86. */
{
IEMOP_MNEMONIC("jbe/jna Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x87. */
{
IEMOP_MNEMONIC("jnbe/ja Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x88. */
{
IEMOP_MNEMONIC("js Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x89. */
{
IEMOP_MNEMONIC("jns Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8a. */
{
IEMOP_MNEMONIC("jp Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8b. */
{
IEMOP_MNEMONIC("jo Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8c. */
{
IEMOP_MNEMONIC("jl/jnge Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8d. */
{
IEMOP_MNEMONIC("jnl/jge Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8e. */
{
IEMOP_MNEMONIC("jle/jng Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8f. */
{
IEMOP_MNEMONIC("jnle/jg Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x90. */
{
IEMOP_MNEMONIC("seto Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x91. */
{
IEMOP_MNEMONIC("setno Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x92. */
{
IEMOP_MNEMONIC("setc Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x93. */
{
IEMOP_MNEMONIC("setnc Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x94. */
{
IEMOP_MNEMONIC("sete Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x95. */
{
IEMOP_MNEMONIC("setne Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x96. */
{
IEMOP_MNEMONIC("setbe Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x97. */
{
IEMOP_MNEMONIC("setnbe Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x98. */
{
IEMOP_MNEMONIC("sets Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x99. */
{
IEMOP_MNEMONIC("setns Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x9a. */
{
IEMOP_MNEMONIC("setnp Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x9b. */
{
IEMOP_MNEMONIC("setnp Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x9c. */
{
IEMOP_MNEMONIC("setl Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x9d. */
{
IEMOP_MNEMONIC("setnl Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x9e. */
{
IEMOP_MNEMONIC("setle Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x9f. */
{
IEMOP_MNEMONIC("setnle Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
/** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
* any way. AMD says it's "unused", whatever that means. We're
* ignoring for now. */
{
/* register target */
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory target */
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/**
* Common 'push segment-register' helper.
*/
{
if (iReg < X86_SREG_FS)
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0xa0. */
{
IEMOP_MNEMONIC("push fs");
}
/** Opcode 0x0f 0xa1. */
{
IEMOP_MNEMONIC("pop fs");
}
/** Opcode 0x0f 0xa2. */
{
IEMOP_MNEMONIC("cpuid");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
}
/**
* Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
* iemOp_bts_Ev_Gv.
*/
{
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
{
/* register destination. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory destination. */
if (pImpl->pfnLockedU16)
else /* BT */
{
}
/** @todo test negative bit offsets! */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x0f 0xa3. */
{
IEMOP_MNEMONIC("bt Gv,Gv");
}
/**
* Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
*/
{
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(4, 0);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(4, 0);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(4, 0);
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/**
* Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
*/
{
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(4, 0);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(4, 0);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(4, 0);
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x0f 0xa4. */
{
IEMOP_MNEMONIC("shld Ev,Gv,Ib");
}
/** Opcode 0x0f 0xa7. */
{
IEMOP_MNEMONIC("shld Ev,Gv,CL");
}
/** Opcode 0x0f 0xa8. */
{
IEMOP_MNEMONIC("push gs");
}
/** Opcode 0x0f 0xa9. */
{
IEMOP_MNEMONIC("pop gs");
}
/** Opcode 0x0f 0xaa. */
/** Opcode 0x0f 0xab. */
{
IEMOP_MNEMONIC("bts Ev,Gv");
}
/** Opcode 0x0f 0xac. */
{
IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
}
/** Opcode 0x0f 0xad. */
{
IEMOP_MNEMONIC("shrd Ev,Gv,CL");
}
/** Opcode 0x0f 0xae mem/0. */
{
IEMOP_MNEMONIC("fxsave m512");
return IEMOP_RAISE_INVALID_OPCODE();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0xae mem/1. */
{
IEMOP_MNEMONIC("fxrstor m512");
return IEMOP_RAISE_INVALID_OPCODE();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0xae mem/2. */
/** Opcode 0x0f 0xae mem/3. */
/** Opcode 0x0f 0xae mem/4. */
/** Opcode 0x0f 0xae mem/5. */
/** Opcode 0x0f 0xae mem/6. */
/** Opcode 0x0f 0xae mem/7. */
/** Opcode 0x0f 0xae 11b/5. */
{
IEMOP_MNEMONIC("lfence");
return IEMOP_RAISE_INVALID_OPCODE();
IEM_MC_BEGIN(0, 0);
else
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0xae 11b/6. */
{
IEMOP_MNEMONIC("mfence");
return IEMOP_RAISE_INVALID_OPCODE();
IEM_MC_BEGIN(0, 0);
else
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0xae 11b/7. */
{
IEMOP_MNEMONIC("sfence");
return IEMOP_RAISE_INVALID_OPCODE();
IEM_MC_BEGIN(0, 0);
else
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xf3 0x0f 0xae 11b/0. */
/** Opcode 0xf3 0x0f 0xae 11b/1. */
/** Opcode 0xf3 0x0f 0xae 11b/2. */
/** Opcode 0xf3 0x0f 0xae 11b/3. */
/** Opcode 0x0f 0xae. */
{
{
{
}
}
else
{
switch (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_LOCK))
{
case 0:
{
case 0: return IEMOP_RAISE_INVALID_OPCODE();
case 1: return IEMOP_RAISE_INVALID_OPCODE();
case 2: return IEMOP_RAISE_INVALID_OPCODE();
case 3: return IEMOP_RAISE_INVALID_OPCODE();
case 4: return IEMOP_RAISE_INVALID_OPCODE();
}
break;
case IEM_OP_PRF_REPZ:
{
case 4: return IEMOP_RAISE_INVALID_OPCODE();
case 5: return IEMOP_RAISE_INVALID_OPCODE();
case 6: return IEMOP_RAISE_INVALID_OPCODE();
case 7: return IEMOP_RAISE_INVALID_OPCODE();
}
break;
default:
return IEMOP_RAISE_INVALID_OPCODE();
}
}
}
/** Opcode 0x0f 0xaf. */
{
IEMOP_MNEMONIC("imul Gv,Ev");
}
/** Opcode 0x0f 0xb0. */
{
IEMOP_MNEMONIC("cmpxchg Eb,Gb");
{
IEM_MC_BEGIN(4, 0);
IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
}
else
{
IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0xb1. */
{
IEMOP_MNEMONIC("cmpxchg Ev,Gv");
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(4, 0);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(4, 0);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(4, 0);
#ifdef RT_ARCH_X86
#else
#endif
#ifdef RT_ARCH_X86
IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
#else
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
#endif
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
#ifdef RT_ARCH_X86
#else
#endif
#ifdef RT_ARCH_X86
IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
#else
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
#endif
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
if (IEM_IS_GUEST_CPU_AMD(pIemCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */
else
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x0f 0xb2. */
{
IEMOP_MNEMONIC("lss Gv,Mp");
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0xb3. */
{
IEMOP_MNEMONIC("btr Ev,Gv");
}
/** Opcode 0x0f 0xb4. */
{
IEMOP_MNEMONIC("lfs Gv,Mp");
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0xb5. */
{
IEMOP_MNEMONIC("lgs Gv,Mp");
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0xb6. */
{
IEMOP_MNEMONIC("movzx Gv,Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/*
* We're loading a register from memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x0f 0xb7. */
{
IEMOP_MNEMONIC("movzx Gv,Ew");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/** @todo Not entirely sure how the operand size prefix is handled here,
* assuming that it will be ignored. Would be nice to have a few
* test for this. */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
{
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
}
}
else
{
/*
* We're loading a register from memory.
*/
{
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
}
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0xb8. */
/** Opcode 0x0f 0xb9. */
{
Log(("iemOp_Grp10 -> #UD\n"));
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0xba. */
{
{
case 0: case 1: case 2: case 3:
return IEMOP_RAISE_INVALID_OPCODE();
}
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
{
/* register destination. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory destination. */
if (pImpl->pfnLockedU16)
else /* BT */
{
}
/** @todo test negative bit offsets! */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
else
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x0f 0xbb. */
{
IEMOP_MNEMONIC("btc Ev,Gv");
}
/** Opcode 0x0f 0xbc. */
{
IEMOP_MNEMONIC("bsf Gv,Ev");
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
}
/** Opcode 0x0f 0xbd. */
{
IEMOP_MNEMONIC("bsr Gv,Ev");
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
}
/** Opcode 0x0f 0xbe. */
{
IEMOP_MNEMONIC("movsx Gv,Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/*
* We're loading a register from memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x0f 0xbf. */
{
IEMOP_MNEMONIC("movsx Gv,Ew");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/** @todo Not entirely sure how the operand size prefix is handled here,
* assuming that it will be ignored. Would be nice to have a few
* test for this. */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
{
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
}
}
else
{
/*
* We're loading a register from memory.
*/
{
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
}
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0xc0. */
{
IEMOP_MNEMONIC("xadd Eb,Gb");
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
else
{
/*
* We're accessing memory.
*/
IEM_MC_FETCH_GREG_U8(u8RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8RegCopy);
IEM_MC_END();
return VINF_SUCCESS;
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0xc1. */
{
IEMOP_MNEMONIC("xadd Ev,Gv");
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/*
* We're accessing memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_FETCH_GREG_U16(u16RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16RegCopy);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_FETCH_GREG_U32(u32RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32RegCopy);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_FETCH_GREG_U64(u64RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64RegCopy);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x0f 0xc2. */
/** Opcode 0x0f 0xc3. */
/** Opcode 0x0f 0xc4. */
/** Opcode 0x0f 0xc5. */
/** Opcode 0x0f 0xc6. */
/** Opcode 0x0f 0xc7 !11/1. */
{
IEMOP_MNEMONIC("cmpxchg8b Mq");
else
/** @todo Testcase: Check effect of cmpxchg8b on bits 63:32 in rax and rdx. */
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode REX.W 0x0f 0xc7 !11/1. */
/** Opcode 0x0f 0xc7 11/6. */
/** Opcode 0x0f 0xc7 !11/6. */
/** Opcode 0x66 0x0f 0xc7 !11/6. */
/** Opcode 0xf3 0x0f 0xc7 !11/6. */
/** Opcode [0xf3] 0x0f 0xc7 !11/7. */
/** Opcode 0x0f 0xc7. */
{
/** @todo Testcase: Check mixing 0x66 and 0xf3. Check the effect of 0xf2. */
{
case 0: case 2: case 3: case 4: case 5:
return IEMOP_RAISE_INVALID_OPCODE();
case 1:
/** @todo Testcase: Check prefix effects on cmpxchg8b/16b. */
|| (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))) /** @todo Testcase: AMD seems to express a different idea here wrt prefixes. */
return IEMOP_RAISE_INVALID_OPCODE();
if (bRm & IEM_OP_PRF_SIZE_REX_W)
case 6:
{
case 0:
case IEM_OP_PRF_SIZE_OP:
case IEM_OP_PRF_REPZ:
default:
return IEMOP_RAISE_INVALID_OPCODE();
}
case 7:
{
case 0:
case IEM_OP_PRF_REPZ:
default:
return IEMOP_RAISE_INVALID_OPCODE();
}
}
}
/**
* Common 'bswap register' helper.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(1, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(1, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(1, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x0f 0xc8. */
{
IEMOP_MNEMONIC("bswap rAX/r8");
/* Note! Intel manuals states that R8-R15 can be accessed by using a REX.X
prefix. REX.B is the correct prefix it appears. For a parallel
case, see iemOp_mov_AL_Ib and iemOp_mov_eAX_Iv. */
}
/** Opcode 0x0f 0xc9. */
{
IEMOP_MNEMONIC("bswap rCX/r9");
}
/** Opcode 0x0f 0xca. */
{
IEMOP_MNEMONIC("bswap rDX/r9");
}
/** Opcode 0x0f 0xcb. */
{
IEMOP_MNEMONIC("bswap rBX/r9");
}
/** Opcode 0x0f 0xcc. */
{
IEMOP_MNEMONIC("bswap rSP/r12");
}
/** Opcode 0x0f 0xcd. */
{
IEMOP_MNEMONIC("bswap rBP/r13");
}
/** Opcode 0x0f 0xce. */
{
IEMOP_MNEMONIC("bswap rSI/r14");
}
/** Opcode 0x0f 0xcf. */
{
IEMOP_MNEMONIC("bswap rDI/r15");
}
/** Opcode 0x0f 0xd0. */
/** Opcode 0x0f 0xd1. */
/** Opcode 0x0f 0xd2. */
/** Opcode 0x0f 0xd3. */
/** Opcode 0x0f 0xd4. */
/** Opcode 0x0f 0xd5. */
/** Opcode 0x0f 0xd6. */
/** Opcode 0x0f 0xd7. */
/** Opcode 0x0f 0xd8. */
/** Opcode 0x0f 0xd9. */
/** Opcode 0x0f 0xda. */
/** Opcode 0x0f 0xdb. */
/** Opcode 0x0f 0xdc. */
/** Opcode 0x0f 0xdd. */
/** Opcode 0x0f 0xde. */
/** Opcode 0x0f 0xdf. */
/** Opcode 0x0f 0xe0. */
/** Opcode 0x0f 0xe1. */
/** Opcode 0x0f 0xe2. */
/** Opcode 0x0f 0xe3. */
/** Opcode 0x0f 0xe4. */
/** Opcode 0x0f 0xe5. */
/** Opcode 0x0f 0xe6. */
/** Opcode 0x0f 0xe7. */
/** Opcode 0x0f 0xe8. */
/** Opcode 0x0f 0xe9. */
/** Opcode 0x0f 0xea. */
/** Opcode 0x0f 0xeb. */
/** Opcode 0x0f 0xec. */
/** Opcode 0x0f 0xed. */
/** Opcode 0x0f 0xee. */
/** Opcode 0x0f 0xef. */
/** Opcode 0x0f 0xf0. */
/** Opcode 0x0f 0xf1. */
/** Opcode 0x0f 0xf2. */
/** Opcode 0x0f 0xf3. */
/** Opcode 0x0f 0xf4. */
/** Opcode 0x0f 0xf5. */
/** Opcode 0x0f 0xf6. */
/** Opcode 0x0f 0xf7. */
/** Opcode 0x0f 0xf8. */
/** Opcode 0x0f 0xf9. */
/** Opcode 0x0f 0xfa. */
/** Opcode 0x0f 0xfb. */
/** Opcode 0x0f 0xfc. */
/** Opcode 0x0f 0xfd. */
/** Opcode 0x0f 0xfe. */
{
/* 0x00 */ iemOp_Grp6,
/* 0x01 */ iemOp_Grp7,
/* 0x02 */ iemOp_lar_Gv_Ew,
/* 0x03 */ iemOp_lsl_Gv_Ew,
/* 0x04 */ iemOp_Invalid,
/* 0x05 */ iemOp_syscall,
/* 0x06 */ iemOp_clts,
/* 0x07 */ iemOp_sysret,
/* 0x08 */ iemOp_invd,
/* 0x09 */ iemOp_wbinvd,
/* 0x0a */ iemOp_Invalid,
/* 0x0b */ iemOp_ud2,
/* 0x0c */ iemOp_Invalid,
/* 0x0d */ iemOp_nop_Ev_GrpP,
/* 0x0e */ iemOp_femms,
/* 0x0f */ iemOp_3Dnow,
/* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
/* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
/* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
/* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
/* 0x18 */ iemOp_prefetch_Grp16,
/* 0x19 */ iemOp_nop_Ev,
/* 0x1a */ iemOp_nop_Ev,
/* 0x1b */ iemOp_nop_Ev,
/* 0x1c */ iemOp_nop_Ev,
/* 0x1d */ iemOp_nop_Ev,
/* 0x1e */ iemOp_nop_Ev,
/* 0x1f */ iemOp_nop_Ev,
/* 0x20 */ iemOp_mov_Rd_Cd,
/* 0x21 */ iemOp_mov_Rd_Dd,
/* 0x22 */ iemOp_mov_Cd_Rd,
/* 0x23 */ iemOp_mov_Dd_Rd,
/* 0x24 */ iemOp_mov_Rd_Td,
/* 0x25 */ iemOp_Invalid,
/* 0x26 */ iemOp_mov_Td_Rd,
/* 0x27 */ iemOp_Invalid,
/* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
/* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
/* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
/* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
/* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
/* 0x30 */ iemOp_wrmsr,
/* 0x31 */ iemOp_rdtsc,
/* 0x32 */ iemOp_rdmsr,
/* 0x33 */ iemOp_rdpmc,
/* 0x34 */ iemOp_sysenter,
/* 0x35 */ iemOp_sysexit,
/* 0x36 */ iemOp_Invalid,
/* 0x37 */ iemOp_getsec,
/* 0x38 */ iemOp_3byte_Esc_A4,
/* 0x39 */ iemOp_Invalid,
/* 0x3a */ iemOp_3byte_Esc_A5,
/* 0x3b */ iemOp_Invalid,
/* 0x3d */ iemOp_Invalid,
/* 0x3e */ iemOp_Invalid,
/* 0x3f */ iemOp_Invalid,
/* 0x40 */ iemOp_cmovo_Gv_Ev,
/* 0x41 */ iemOp_cmovno_Gv_Ev,
/* 0x42 */ iemOp_cmovc_Gv_Ev,
/* 0x43 */ iemOp_cmovnc_Gv_Ev,
/* 0x44 */ iemOp_cmove_Gv_Ev,
/* 0x45 */ iemOp_cmovne_Gv_Ev,
/* 0x46 */ iemOp_cmovbe_Gv_Ev,
/* 0x47 */ iemOp_cmovnbe_Gv_Ev,
/* 0x48 */ iemOp_cmovs_Gv_Ev,
/* 0x49 */ iemOp_cmovns_Gv_Ev,
/* 0x4a */ iemOp_cmovp_Gv_Ev,
/* 0x4b */ iemOp_cmovnp_Gv_Ev,
/* 0x4c */ iemOp_cmovl_Gv_Ev,
/* 0x4d */ iemOp_cmovnl_Gv_Ev,
/* 0x4e */ iemOp_cmovle_Gv_Ev,
/* 0x4f */ iemOp_cmovnle_Gv_Ev,
/* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
/* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
/* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
/* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
/* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
/* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
/* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
/* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
/* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
/* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
/* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
/* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
/* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
/* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
/* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
/* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
/* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
/* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
/* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
/* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
/* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
/* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
/* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
/* 0x71 */ iemOp_Grp12,
/* 0x72 */ iemOp_Grp13,
/* 0x73 */ iemOp_Grp14,
/* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
/* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
/* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
/* 0x77 */ iemOp_emms,
/* 0x78 */ iemOp_vmread_AmdGrp17,
/* 0x79 */ iemOp_vmwrite,
/* 0x7a */ iemOp_Invalid,
/* 0x7b */ iemOp_Invalid,
/* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
/* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
/* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
/* 0x80 */ iemOp_jo_Jv,
/* 0x81 */ iemOp_jno_Jv,
/* 0x82 */ iemOp_jc_Jv,
/* 0x83 */ iemOp_jnc_Jv,
/* 0x84 */ iemOp_je_Jv,
/* 0x85 */ iemOp_jne_Jv,
/* 0x86 */ iemOp_jbe_Jv,
/* 0x87 */ iemOp_jnbe_Jv,
/* 0x88 */ iemOp_js_Jv,
/* 0x89 */ iemOp_jns_Jv,
/* 0x8a */ iemOp_jp_Jv,
/* 0x8b */ iemOp_jnp_Jv,
/* 0x8c */ iemOp_jl_Jv,
/* 0x8d */ iemOp_jnl_Jv,
/* 0x8e */ iemOp_jle_Jv,
/* 0x8f */ iemOp_jnle_Jv,
/* 0x90 */ iemOp_seto_Eb,
/* 0x91 */ iemOp_setno_Eb,
/* 0x92 */ iemOp_setc_Eb,
/* 0x93 */ iemOp_setnc_Eb,
/* 0x94 */ iemOp_sete_Eb,
/* 0x95 */ iemOp_setne_Eb,
/* 0x96 */ iemOp_setbe_Eb,
/* 0x97 */ iemOp_setnbe_Eb,
/* 0x98 */ iemOp_sets_Eb,
/* 0x99 */ iemOp_setns_Eb,
/* 0x9a */ iemOp_setp_Eb,
/* 0x9b */ iemOp_setnp_Eb,
/* 0x9c */ iemOp_setl_Eb,
/* 0x9d */ iemOp_setnl_Eb,
/* 0x9e */ iemOp_setle_Eb,
/* 0x9f */ iemOp_setnle_Eb,
/* 0xa0 */ iemOp_push_fs,
/* 0xa1 */ iemOp_pop_fs,
/* 0xa2 */ iemOp_cpuid,
/* 0xa3 */ iemOp_bt_Ev_Gv,
/* 0xa4 */ iemOp_shld_Ev_Gv_Ib,
/* 0xa5 */ iemOp_shld_Ev_Gv_CL,
/* 0xa6 */ iemOp_Invalid,
/* 0xa7 */ iemOp_Invalid,
/* 0xa8 */ iemOp_push_gs,
/* 0xa9 */ iemOp_pop_gs,
/* 0xaa */ iemOp_rsm,
/* 0xab */ iemOp_bts_Ev_Gv,
/* 0xac */ iemOp_shrd_Ev_Gv_Ib,
/* 0xad */ iemOp_shrd_Ev_Gv_CL,
/* 0xae */ iemOp_Grp15,
/* 0xaf */ iemOp_imul_Gv_Ev,
/* 0xb0 */ iemOp_cmpxchg_Eb_Gb,
/* 0xb1 */ iemOp_cmpxchg_Ev_Gv,
/* 0xb2 */ iemOp_lss_Gv_Mp,
/* 0xb3 */ iemOp_btr_Ev_Gv,
/* 0xb4 */ iemOp_lfs_Gv_Mp,
/* 0xb5 */ iemOp_lgs_Gv_Mp,
/* 0xb6 */ iemOp_movzx_Gv_Eb,
/* 0xb7 */ iemOp_movzx_Gv_Ew,
/* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,
/* 0xb9 */ iemOp_Grp10,
/* 0xba */ iemOp_Grp8,
/* 0xbd */ iemOp_btc_Ev_Gv,
/* 0xbc */ iemOp_bsf_Gv_Ev,
/* 0xbd */ iemOp_bsr_Gv_Ev,
/* 0xbe */ iemOp_movsx_Gv_Eb,
/* 0xbf */ iemOp_movsx_Gv_Ew,
/* 0xc0 */ iemOp_xadd_Eb_Gb,
/* 0xc1 */ iemOp_xadd_Ev_Gv,
/* 0xc3 */ iemOp_movnti_My_Gy,
/* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
/* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
/* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
/* 0xc7 */ iemOp_Grp9,
/* 0xc8 */ iemOp_bswap_rAX_r8,
/* 0xc9 */ iemOp_bswap_rCX_r9,
/* 0xca */ iemOp_bswap_rDX_r10,
/* 0xcb */ iemOp_bswap_rBX_r11,
/* 0xcc */ iemOp_bswap_rSP_r12,
/* 0xcd */ iemOp_bswap_rBP_r13,
/* 0xce */ iemOp_bswap_rSI_r14,
/* 0xcf */ iemOp_bswap_rDI_r15,
/* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
/* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
/* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
/* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
/* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
/* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
/* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
/* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
/* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
/* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
/* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
/* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
/* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
/* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
/* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
/* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
/* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
/* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
/* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
/* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
/* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
/* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
/* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
/* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
/* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
/* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
/* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
/* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
/* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
/* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
/* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
/* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
/* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
/* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
/* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
/* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
/* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
/* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
/* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
/* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
/* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
/* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
/* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
/* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
/* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
/* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
/* 0xff */ iemOp_Invalid
};
/** @} */
/** @name One byte opcodes.
*
* @{
*/
/** Opcode 0x00. */
{
IEMOP_MNEMONIC("add Eb,Gb");
}
/** Opcode 0x01. */
{
IEMOP_MNEMONIC("add Ev,Gv");
}
/** Opcode 0x02. */
{
IEMOP_MNEMONIC("add Gb,Eb");
}
/** Opcode 0x03. */
{
IEMOP_MNEMONIC("add Gv,Ev");
}
/** Opcode 0x04. */
{
IEMOP_MNEMONIC("add al,Ib");
}
/** Opcode 0x05. */
{
IEMOP_MNEMONIC("add rAX,Iz");
}
/** Opcode 0x06. */
{
IEMOP_MNEMONIC("push es");
}
/** Opcode 0x07. */
{
IEMOP_MNEMONIC("pop es");
}
/** Opcode 0x08. */
{
IEMOP_MNEMONIC("or Eb,Gb");
}
/** Opcode 0x09. */
{
IEMOP_MNEMONIC("or Ev,Gv ");
}
/** Opcode 0x0a. */
{
IEMOP_MNEMONIC("or Gb,Eb");
}
/** Opcode 0x0b. */
{
IEMOP_MNEMONIC("or Gv,Ev");
}
/** Opcode 0x0c. */
{
IEMOP_MNEMONIC("or al,Ib");
}
/** Opcode 0x0d. */
{
IEMOP_MNEMONIC("or rAX,Iz");
}
/** Opcode 0x0e. */
{
IEMOP_MNEMONIC("push cs");
}
/** Opcode 0x0f. */
{
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
}
/** Opcode 0x10. */
{
IEMOP_MNEMONIC("adc Eb,Gb");
}
/** Opcode 0x11. */
{
IEMOP_MNEMONIC("adc Ev,Gv");
}
/** Opcode 0x12. */
{
IEMOP_MNEMONIC("adc Gb,Eb");
}
/** Opcode 0x13. */
{
IEMOP_MNEMONIC("adc Gv,Ev");
}
/** Opcode 0x14. */
{
IEMOP_MNEMONIC("adc al,Ib");
}
/** Opcode 0x15. */
{
IEMOP_MNEMONIC("adc rAX,Iz");
}
/** Opcode 0x16. */
{
IEMOP_MNEMONIC("push ss");
}
/** Opcode 0x17. */
{
}
/** Opcode 0x18. */
{
IEMOP_MNEMONIC("sbb Eb,Gb");
}
/** Opcode 0x19. */
{
IEMOP_MNEMONIC("sbb Ev,Gv");
}
/** Opcode 0x1a. */
{
IEMOP_MNEMONIC("sbb Gb,Eb");
}
/** Opcode 0x1b. */
{
IEMOP_MNEMONIC("sbb Gv,Ev");
}
/** Opcode 0x1c. */
{
IEMOP_MNEMONIC("sbb al,Ib");
}
/** Opcode 0x1d. */
{
IEMOP_MNEMONIC("sbb rAX,Iz");
}
/** Opcode 0x1e. */
{
IEMOP_MNEMONIC("push ds");
}
/** Opcode 0x1f. */
{
IEMOP_MNEMONIC("pop ds");
}
/** Opcode 0x20. */
{
IEMOP_MNEMONIC("and Eb,Gb");
}
/** Opcode 0x21. */
{
IEMOP_MNEMONIC("and Ev,Gv");
}
/** Opcode 0x22. */
{
IEMOP_MNEMONIC("and Gb,Eb");
}
/** Opcode 0x23. */
{
IEMOP_MNEMONIC("and Gv,Ev");
}
/** Opcode 0x24. */
{
IEMOP_MNEMONIC("and al,Ib");
}
/** Opcode 0x25. */
{
IEMOP_MNEMONIC("and rAX,Iz");
}
/** Opcode 0x26. */
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg es");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x27. */
/** Opcode 0x28. */
{
IEMOP_MNEMONIC("sub Eb,Gb");
}
/** Opcode 0x29. */
{
IEMOP_MNEMONIC("sub Ev,Gv");
}
/** Opcode 0x2a. */
{
IEMOP_MNEMONIC("sub Gb,Eb");
}
/** Opcode 0x2b. */
{
IEMOP_MNEMONIC("sub Gv,Ev");
}
/** Opcode 0x2c. */
{
IEMOP_MNEMONIC("sub al,Ib");
}
/** Opcode 0x2d. */
{
IEMOP_MNEMONIC("sub rAX,Iz");
}
/** Opcode 0x2e. */
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg cs");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x2f. */
/** Opcode 0x30. */
{
IEMOP_MNEMONIC("xor Eb,Gb");
}
/** Opcode 0x31. */
{
IEMOP_MNEMONIC("xor Ev,Gv");
}
/** Opcode 0x32. */
{
IEMOP_MNEMONIC("xor Gb,Eb");
}
/** Opcode 0x33. */
{
IEMOP_MNEMONIC("xor Gv,Ev");
}
/** Opcode 0x34. */
{
IEMOP_MNEMONIC("xor al,Ib");
}
/** Opcode 0x35. */
{
IEMOP_MNEMONIC("xor rAX,Iz");
}
/** Opcode 0x36. */
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ss");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x37. */
/** Opcode 0x38. */
{
IEMOP_MNEMONIC("cmp Eb,Gb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
}
/** Opcode 0x39. */
{
IEMOP_MNEMONIC("cmp Ev,Gv");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
}
/** Opcode 0x3a. */
{
IEMOP_MNEMONIC("cmp Gb,Eb");
}
/** Opcode 0x3b. */
{
IEMOP_MNEMONIC("cmp Gv,Ev");
}
/** Opcode 0x3c. */
{
IEMOP_MNEMONIC("cmp al,Ib");
}
/** Opcode 0x3d. */
{
IEMOP_MNEMONIC("cmp rAX,Iz");
}
/** Opcode 0x3e. */
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ds");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x3f. */
/**
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
return VINF_SUCCESS;
}
/** Opcode 0x40. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eAX");
}
/** Opcode 0x41. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.b");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eCX");
}
/** Opcode 0x42. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.x");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eDX");
}
/** Opcode 0x43. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bx");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eBX");
}
/** Opcode 0x44. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.r");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eSP");
}
/** Opcode 0x45. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rb");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eBP");
}
/** Opcode 0x46. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rx");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eSI");
}
/** Opcode 0x47. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbx");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eDI");
}
/** Opcode 0x48. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.w");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eAX");
}
/** Opcode 0x49. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bw");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eCX");
}
/** Opcode 0x4a. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.xw");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eDX");
}
/** Opcode 0x4b. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bxw");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eBX");
}
/** Opcode 0x4c. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rw");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eSP");
}
/** Opcode 0x4d. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbw");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eBP");
}
/** Opcode 0x4e. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rxw");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eSI");
}
/** Opcode 0x4f. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbxw");
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eDI");
}
/**
* Common 'push register' helper.
*/
{
{
}
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
return VINF_SUCCESS;
}
/** Opcode 0x50. */
{
IEMOP_MNEMONIC("push rAX");
}
/** Opcode 0x51. */
{
IEMOP_MNEMONIC("push rCX");
}
/** Opcode 0x52. */
{
IEMOP_MNEMONIC("push rDX");
}
/** Opcode 0x53. */
{
IEMOP_MNEMONIC("push rBX");
}
/** Opcode 0x54. */
{
IEMOP_MNEMONIC("push rSP");
}
/** Opcode 0x55. */
{
IEMOP_MNEMONIC("push rBP");
}
/** Opcode 0x56. */
{
IEMOP_MNEMONIC("push rSI");
}
/** Opcode 0x57. */
{
IEMOP_MNEMONIC("push rDI");
}
/**
* Common 'pop register' helper.
*/
{
{
}
/** @todo How does this code handle iReg==X86_GREG_xSP. How does a real CPU
* handle it, for that matter (Intel pseudo code hints that the popped
* value is incremented by the stack item size.) Test it, both encodings
* and all three register sizes. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
return VINF_SUCCESS;
}
/** Opcode 0x58. */
{
IEMOP_MNEMONIC("pop rAX");
}
/** Opcode 0x59. */
{
IEMOP_MNEMONIC("pop rCX");
}
/** Opcode 0x5a. */
{
IEMOP_MNEMONIC("pop rDX");
}
/** Opcode 0x5b. */
{
IEMOP_MNEMONIC("pop rBX");
}
/** Opcode 0x5c. */
{
IEMOP_MNEMONIC("pop rSP");
}
/** Opcode 0x5d. */
{
IEMOP_MNEMONIC("pop rBP");
}
/** Opcode 0x5e. */
{
IEMOP_MNEMONIC("pop rSI");
}
/** Opcode 0x5f. */
{
IEMOP_MNEMONIC("pop rDI");
}
/** Opcode 0x60. */
{
IEMOP_MNEMONIC("pusha");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
}
/** Opcode 0x61. */
{
IEMOP_MNEMONIC("popa");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
}
/** Opcode 0x62. */
/** Opcode 0x63 - non-64-bit modes. */
/** Opcode 0x63.
* @note This is a weird one. It works like a regular move instruction if
* REX.W isn't set, at least according to AMD docs (rev 3.15, 2009-11).
* @todo This definitely needs a testcase to verify the odd cases. */
{
IEMOP_MNEMONIC("movsxd Gv,Ev");
{
/*
* Register to register.
*/
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
}
else
{
/*
* We're loading a register from memory.
*/
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x64. */
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg fs");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x65. */
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg gs");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x66. */
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("op size");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x67. */
{
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("addr size");
switch (pIemCpu->enmDefAddrMode)
{
default: AssertFailed();
}
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x68. */
{
IEMOP_MNEMONIC("push Iz");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x69. */
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
{
/* register operand */
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
{
/* register operand */
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
{
/* register operand */
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
}
}
}
/** Opcode 0x6a. */
{
IEMOP_MNEMONIC("push Ib");
IEM_MC_BEGIN(0,0);
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
break;
}
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x6b. */
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
/* register operand */
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
case IEMMODE_32BIT:
{
/* register operand */
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
case IEMMODE_64BIT:
{
/* register operand */
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
}
}
/** Opcode 0x6c. */
{
{
IEMOP_MNEMONIC("rep ins Yb,DX");
switch (pIemCpu->enmEffAddrMode)
{
}
}
else
{
IEMOP_MNEMONIC("ins Yb,DX");
switch (pIemCpu->enmEffAddrMode)
{
}
}
}
/** Opcode 0x6d. */
{
{
IEMOP_MNEMONIC("rep ins Yv,DX");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
}
else
{
IEMOP_MNEMONIC("ins Yv,DX");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
}
}
/** Opcode 0x6e. */
{
{
IEMOP_MNEMONIC("rep out DX,Yb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
else
{
IEMOP_MNEMONIC("out DX,Yb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
}
/** Opcode 0x6f. */
{
{
IEMOP_MNEMONIC("rep outs DX,Yv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
}
break;
case IEMMODE_64BIT:
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
}
break;
}
}
else
{
IEMOP_MNEMONIC("outs DX,Yv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
}
}
/** Opcode 0x70. */
{
IEMOP_MNEMONIC("jo Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x71. */
{
IEMOP_MNEMONIC("jno Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x72. */
{
IEMOP_MNEMONIC("jc/jnae Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x73. */
{
IEMOP_MNEMONIC("jnc/jnb Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x74. */
{
IEMOP_MNEMONIC("je/jz Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x75. */
{
IEMOP_MNEMONIC("jne/jnz Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x76. */
{
IEMOP_MNEMONIC("jbe/jna Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x77. */
{
IEMOP_MNEMONIC("jnbe/ja Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x78. */
{
IEMOP_MNEMONIC("js Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x79. */
{
IEMOP_MNEMONIC("jns Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7a. */
{
IEMOP_MNEMONIC("jp Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7b. */
{
IEMOP_MNEMONIC("jnp Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7c. */
{
IEMOP_MNEMONIC("jl/jnge Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7d. */
{
IEMOP_MNEMONIC("jnl/jge Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7e. */
{
IEMOP_MNEMONIC("jle/jng Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7f. */
{
IEMOP_MNEMONIC("jnle/jg Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x80. */
{
IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
{
/* register target */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory target */
if (pImpl->pfnLockedU8)
else
{ /* CMP */
}
else
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x81. */
{
IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
{
/* register target */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory target */
if (pImpl->pfnLockedU16)
else
{ /* CMP, TEST */
}
else
IEM_MC_END();
}
break;
}
case IEMMODE_32BIT:
{
{
/* register target */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory target */
if (pImpl->pfnLockedU32)
else
{ /* CMP, TEST */
}
else
IEM_MC_END();
}
break;
}
case IEMMODE_64BIT:
{
{
/* register target */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory target */
if (pImpl->pfnLockedU64)
else
{ /* CMP */
}
else
IEM_MC_END();
}
break;
}
}
return VINF_SUCCESS;
}
/** Opcode 0x82. */
{
IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
}
/** Opcode 0x83. */
{
IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
{
/*
* Register target
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
break;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
break;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
break;
}
}
}
else
{
/*
* Memory target.
*/
if (pImpl->pfnLockedU16)
else
{ /* CMP */
}
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
else
IEM_MC_END();
break;
}
case IEMMODE_32BIT:
{
else
IEM_MC_END();
break;
}
case IEMMODE_64BIT:
{
else
IEM_MC_END();
break;
}
}
}
return VINF_SUCCESS;
}
/** Opcode 0x84. */
{
IEMOP_MNEMONIC("test Eb,Gb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
}
/** Opcode 0x85. */
{
IEMOP_MNEMONIC("test Ev,Gv");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
}
/** Opcode 0x86. */
{
IEMOP_MNEMONIC("xchg Eb,Gb");
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
IEM_MC_END();
}
else
{
/*
* We're accessing memory.
*/
/** @todo the register must be committed separately! */
IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x87. */
{
IEMOP_MNEMONIC("xchg Ev,Gv");
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/*
* We're accessing memory.
*/
switch (pIemCpu->enmEffOpSize)
{
/** @todo the register must be committed separately! */
case IEMMODE_16BIT:
IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x88. */
{
IEMOP_MNEMONIC("mov Eb,Gb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(0, 1);
IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
else
{
/*
* We're writing a register to memory.
*/
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x89. */
{
IEMOP_MNEMONIC("mov Ev,Gv");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
else
{
/*
* We're writing a register to memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/** Opcode 0x8a. */
{
IEMOP_MNEMONIC("mov Gb,Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
IEM_MC_END();
}
else
{
/*
* We're loading a register from memory.
*/
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x8b. */
{
IEMOP_MNEMONIC("mov Gv,Ev");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
break;
}
}
else
{
/*
* We're loading a register from memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/** Opcode 0x63. */
{
return FNIEMOP_CALL(iemOp_arpl_Ew_Gw);
return FNIEMOP_CALL(iemOp_mov_Gv_Ev);
return FNIEMOP_CALL(iemOp_movsxd_Gv_Ev);
}
/** Opcode 0x8c. */
{
IEMOP_MNEMONIC("mov Ev,Sw");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* Check that the destination register exists. The REX.R prefix is ignored.
*/
if ( iSegReg > X86_SREG_GS)
return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
* In that case, the operand size is respected and the upper bits are
* cleared (starting with some pentium).
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
}
else
{
/*
* We're saving the register to memory. The access is word sized
* regardless of operand size prefixes.
*/
#if 0 /* not necessary */
#endif
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x8d. */
{
IEMOP_MNEMONIC("lea Gv,M");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
return IEMOP_RAISE_INVALID_OPCODE(); /* no register form */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x8e. */
{
IEMOP_MNEMONIC("mov Sw,Ev");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* The practical operand size is 16-bit.
*/
#if 0 /* not necessary */
#endif
/*
* Check that the destination register exists and can be used with this
* instruction. The REX.R prefix is ignored.
*/
if ( iSegReg == X86_SREG_CS
|| iSegReg > X86_SREG_GS)
return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
}
else
{
/*
* We're loading the register from memory. The access is word sized
* regardless of operand size prefixes.
*/
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x8f /0. */
{
/* This bugger is rather annoying as it requires rSP to be updated before
doing the effective address calculations. Will eventually require a
split between the R/M+SIB decoding and the effective address
calculation - which is something that is required for any attempt at
reusing this code for a recompiler. It may also be good to have if we
need to delay #UD exception caused by invalid lock prefixes.
For now, we'll do a mostly safe interpreter-only implementation here. */
/** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
* now until tests show it's checked.. */
IEMOP_MNEMONIC("pop Ev");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/* Register access is relatively easy and can share code. */
/*
* Memory target.
*
* Intel says that RSP is incremented before it's used in any effective
* address calcuations. This means some serious extra annoyance here since
* we decode and calculate the effective address in one step and like to
* delay committing registers till everything is done.
*
* So, we'll decode and calculate the effective address twice. This will
* require some recoding if turned into a recompiler.
*/
IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
#ifndef TST_IEM_CHECK_MC
/* Calc effective address with modified ESP. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
switch (pIemCpu->enmEffOpSize)
{
}
/* Perform the operation - this should be CImpl. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
if (rcStrict == VINF_SUCCESS)
break;
}
case IEMMODE_32BIT:
{
if (rcStrict == VINF_SUCCESS)
break;
}
case IEMMODE_64BIT:
{
if (rcStrict == VINF_SUCCESS)
break;
}
}
if (rcStrict == VINF_SUCCESS)
{
}
return rcStrict;
#else
return VERR_IEM_IPE_2;
#endif
}
/** Opcode 0x8f. */
{
return IEMOP_RAISE_INVALID_OPCODE();
}
/**
* Common 'xchg reg,rAX' helper.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x90. */
{
{
IEMOP_MNEMONIC("xchg r8,rAX");
}
IEMOP_MNEMONIC("pause");
else
IEMOP_MNEMONIC("nop");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x91. */
{
IEMOP_MNEMONIC("xchg rCX,rAX");
}
/** Opcode 0x92. */
{
IEMOP_MNEMONIC("xchg rDX,rAX");
}
/** Opcode 0x93. */
{
IEMOP_MNEMONIC("xchg rBX,rAX");
}
/** Opcode 0x94. */
{
IEMOP_MNEMONIC("xchg rSX,rAX");
}
/** Opcode 0x95. */
{
IEMOP_MNEMONIC("xchg rBP,rAX");
}
/** Opcode 0x96. */
{
IEMOP_MNEMONIC("xchg rSI,rAX");
}
/** Opcode 0x97. */
{
IEMOP_MNEMONIC("xchg rDI,rAX");
}
/** Opcode 0x98. */
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEMOP_MNEMONIC("cbw");
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEMOP_MNEMONIC("cwde");
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEMOP_MNEMONIC("cdqe");
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x99. */
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEMOP_MNEMONIC("cwd");
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEMOP_MNEMONIC("cdq");
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEMOP_MNEMONIC("cqo");
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x9a. */
{
IEMOP_MNEMONIC("call Ap");
/* Decode the far pointer address and pass it on to the far call C implementation. */
else
}
/** Opcode 0x9b. (aka fwait) */
{
IEMOP_MNEMONIC("wait");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x9c. */
{
}
/** Opcode 0x9d. */
{
}
/** Opcode 0x9e. */
{
IEMOP_MNEMONIC("sahf");
return IEMOP_RAISE_INVALID_OPCODE();
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x9f. */
{
IEMOP_MNEMONIC("lahf");
return IEMOP_RAISE_INVALID_OPCODE();
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
}
/**
* Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
* iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
* prefixes. Will return on failures.
* @param a_GCPtrMemOff The variable to store the offset in.
*/
#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
do \
{ \
switch (pIemCpu->enmEffAddrMode) \
{ \
case IEMMODE_16BIT: \
break; \
case IEMMODE_32BIT: \
break; \
case IEMMODE_64BIT: \
break; \
} \
} while (0)
/** Opcode 0xa0. */
{
/*
* Get the offset and fend of lock prefixes.
*/
/*
* Fetch AL.
*/
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xa1. */
{
/*
* Get the offset and fend of lock prefixes.
*/
IEMOP_MNEMONIC("mov rAX,Ov");
/*
* Fetch rAX.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xa2. */
{
/*
* Get the offset and fend of lock prefixes.
*/
/*
* Store AL.
*/
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xa3. */
{
/*
* Get the offset and fend of lock prefixes.
*/
/*
* Store rAX.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
IEM_MC_BEGIN(0, 2); \
} IEM_MC_ELSE() { \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END();
/** Opcode 0xa4. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep movsb Xb,Yb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
IEMOP_MNEMONIC("movsb Xb,Yb");
/*
* Sharing case implementation with movs[wdq] below.
*/
switch (pIemCpu->enmEffAddrMode)
{
}
return VINF_SUCCESS;
}
/** Opcode 0xa5. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep movs Xv,Yv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
}
}
}
IEMOP_MNEMONIC("movs Xv,Yv");
/*
* Annoying double switch here.
* Using ugly macro for implementing the cases, sharing it with movsb.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
return VINF_SUCCESS;
}
/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
\
\
} IEM_MC_ELSE() { \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
/** Opcode 0xa6. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("repe cmps Xb,Yb");
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
}
}
{
IEMOP_MNEMONIC("repe cmps Xb,Yb");
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
}
}
IEMOP_MNEMONIC("cmps Xb,Yb");
/*
* Sharing case implementation with cmps[wdq] below.
*/
switch (pIemCpu->enmEffAddrMode)
{
}
return VINF_SUCCESS;
}
/** Opcode 0xa7. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("repe cmps Xv,Yv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
}
}
}
{
IEMOP_MNEMONIC("repne cmps Xv,Yv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
}
}
}
IEMOP_MNEMONIC("cmps Xv,Yv");
/*
* Annoying double switch here.
* Using ugly macro for implementing the cases, sharing it with cmpsb.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
return VINF_SUCCESS;
}
/** Opcode 0xa8. */
{
IEMOP_MNEMONIC("test al,Ib");
}
/** Opcode 0xa9. */
{
IEMOP_MNEMONIC("test rAX,Iz");
}
/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
IEM_MC_BEGIN(0, 2); \
} IEM_MC_ELSE() { \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
/** Opcode 0xaa. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep stos Yb,al");
switch (pIemCpu->enmEffAddrMode)
{
}
}
IEMOP_MNEMONIC("stos Yb,al");
/*
* Sharing case implementation with stos[wdq] below.
*/
switch (pIemCpu->enmEffAddrMode)
{
}
return VINF_SUCCESS;
}
/** Opcode 0xab. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep stos Yv,rAX");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
}
}
IEMOP_MNEMONIC("stos Yv,rAX");
/*
* Annoying double switch here.
* Using ugly macro for implementing the cases, sharing it with stosb.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
return VINF_SUCCESS;
}
/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
IEM_MC_BEGIN(0, 2); \
} IEM_MC_ELSE() { \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END();
/** Opcode 0xac. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep lodsb al,Xb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
IEMOP_MNEMONIC("lodsb al,Xb");
/*
* Sharing case implementation with stos[wdq] below.
*/
switch (pIemCpu->enmEffAddrMode)
{
}
return VINF_SUCCESS;
}
/** Opcode 0xad. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep lods rAX,Xv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
}
}
IEMOP_MNEMONIC("lods rAX,Xv");
/*
* Annoying double switch here.
* Using ugly macro for implementing the cases, sharing it with lodsb.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
return VINF_SUCCESS;
}
/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
\
\
} IEM_MC_ELSE() { \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END();
/** Opcode 0xae. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("repe scasb al,Xb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
{
IEMOP_MNEMONIC("repne scasb al,Xb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
IEMOP_MNEMONIC("scasb al,Xb");
/*
* Sharing case implementation with stos[wdq] below.
*/
switch (pIemCpu->enmEffAddrMode)
{
}
return VINF_SUCCESS;
}
/** Opcode 0xaf. */
{
/*
* Use the C implementation if a repeat prefix is encountered.
*/
{
IEMOP_MNEMONIC("repe scas rAX,Xv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: AssertFailedReturn(VERR_INTERNAL_ERROR_3); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
}
}
}
{
IEMOP_MNEMONIC("repne scas rAX,Xv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
}
}
IEMOP_MNEMONIC("scas rAX,Xv");
/*
* Annoying double switch here.
* Using ugly macro for implementing the cases, sharing it with scasb.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
return VINF_SUCCESS;
}
/**
* Common 'mov r8, imm8' helper.
*/
{
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xb0. */
{
IEMOP_MNEMONIC("mov AL,Ib");
}
/** Opcode 0xb1. */
{
IEMOP_MNEMONIC("mov CL,Ib");
}
/** Opcode 0xb2. */
{
IEMOP_MNEMONIC("mov DL,Ib");
}
/** Opcode 0xb3. */
{
IEMOP_MNEMONIC("mov BL,Ib");
}
/** Opcode 0xb4. */
{
IEMOP_MNEMONIC("mov AH,Ib");
}
/** Opcode 0xb5. */
{
IEMOP_MNEMONIC("mov CH,Ib");
}
/** Opcode 0xb6. */
{
IEMOP_MNEMONIC("mov DH,Ib");
}
/** Opcode 0xb7. */
{
IEMOP_MNEMONIC("mov BH,Ib");
}
/**
* Common 'mov regX,immX' helper.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/** Opcode 0xb8. */
{
IEMOP_MNEMONIC("mov rAX,IV");
}
/** Opcode 0xb9. */
{
IEMOP_MNEMONIC("mov rCX,IV");
}
/** Opcode 0xba. */
{
IEMOP_MNEMONIC("mov rDX,IV");
}
/** Opcode 0xbb. */
{
IEMOP_MNEMONIC("mov rBX,IV");
}
/** Opcode 0xbc. */
{
IEMOP_MNEMONIC("mov rSP,IV");
}
/** Opcode 0xbd. */
{
IEMOP_MNEMONIC("mov rBP,IV");
}
/** Opcode 0xbe. */
{
IEMOP_MNEMONIC("mov rSI,IV");
}
/** Opcode 0xbf. */
{
IEMOP_MNEMONIC("mov rDI,IV");
}
/** Opcode 0xc0. */
{
{
case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
}
{
/* register */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xc1. */
{
{
case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
}
{
/* register */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xc2. */
{
IEMOP_MNEMONIC("retn Iw");
}
/** Opcode 0xc3. */
{
IEMOP_MNEMONIC("retn");
}
/** Opcode 0xc4. */
{
{
IEMOP_MNEMONIC("2-byte-vex");
/* The LES instruction is invalid 64-bit mode. In legacy and
compatability mode it is invalid with MOD=3.
The use as a VEX prefix is made possible by assigning the inverted
REX.R to the top MOD bit, and the top bit in the inverted register
specifier to the bottom MOD bit, thereby effectively limiting 32-bit
to accessing registers 0..7 in this VEX form. */
/** @todo VEX: Just use new tables for it. */
return IEMOP_RAISE_INVALID_OPCODE();
}
IEMOP_MNEMONIC("les Gv,Mp");
}
/** Opcode 0xc5. */
{
{
IEMOP_MNEMONIC("3-byte-vex");
/* The LDS instruction is invalid 64-bit mode. In legacy and
compatability mode it is invalid with MOD=3.
The use as a VEX prefix is made possible by assigning the inverted
REX.R and REX.X to the two MOD bits, since the REX bits are ignored
outside of 64-bit mode. */
/** @todo VEX: Just use new tables for it. */
return IEMOP_RAISE_INVALID_OPCODE();
}
IEMOP_MNEMONIC("lds Gv,Mp");
}
/** Opcode 0xc6. */
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
return IEMOP_RAISE_INVALID_OPCODE();
IEMOP_MNEMONIC("mov Eb,Ib");
{
/* register access */
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
}
else
{
/* memory access. */
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xc7. */
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
return IEMOP_RAISE_INVALID_OPCODE();
IEMOP_MNEMONIC("mov Ev,Iz");
{
/* register access */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory access. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xc8. */
{
IEMOP_MNEMONIC("enter Iw,Ib");
}
/** Opcode 0xc9. */
{
IEMOP_MNEMONIC("retn");
}
/** Opcode 0xca. */
{
IEMOP_MNEMONIC("retf Iw");
}
/** Opcode 0xcb. */
{
IEMOP_MNEMONIC("retf");
}
/** Opcode 0xcc. */
{
}
/** Opcode 0xcd. */
{
}
/** Opcode 0xce. */
{
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xcf. */
{
IEMOP_MNEMONIC("iret");
}
/** Opcode 0xd0. */
{
{
case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
}
{
/* register */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xd1. */
{
{
case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
}
{
/* register */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xd2. */
{
{
case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
}
{
/* register */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xd3. */
{
{
case 6: return IEMOP_RAISE_INVALID_OPCODE();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
}
{
/* register */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xd4. */
{
IEMOP_MNEMONIC("aam Ib");
if (!bImm)
return IEMOP_RAISE_DIVIDE_ERROR();
}
/** Opcode 0xd5. */
{
IEMOP_MNEMONIC("aad Ib");
}
/** Opcode 0xd7. */
{
IEMOP_MNEMONIC("xlat");
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/**
* Common worker for FPU instructions working on ST0 and STn, and storing the
* result in ST0.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/**
* Common worker for FPU instructions working on ST0 and STn, and only affecting
* flags.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/**
* Common worker for FPU instructions working on ST0 and STn, only affecting
* flags, and popping when done.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd8 11/0. */
{
IEMOP_MNEMONIC("fadd st0,stN");
}
/** Opcode 0xd8 11/1. */
{
IEMOP_MNEMONIC("fmul st0,stN");
}
/** Opcode 0xd8 11/2. */
{
IEMOP_MNEMONIC("fcom st0,stN");
}
/** Opcode 0xd8 11/3. */
{
IEMOP_MNEMONIC("fcomp st0,stN");
}
/** Opcode 0xd8 11/4. */
{
IEMOP_MNEMONIC("fsub st0,stN");
}
/** Opcode 0xd8 11/5. */
{
IEMOP_MNEMONIC("fsubr st0,stN");
}
/** Opcode 0xd8 11/6. */
{
IEMOP_MNEMONIC("fdiv st0,stN");
}
/** Opcode 0xd8 11/7. */
{
IEMOP_MNEMONIC("fdivr st0,stN");
}
/**
* Common worker for FPU instructions working on ST0 and an m32r, and storing
* the result in ST0.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd8 !11/0. */
{
IEMOP_MNEMONIC("fadd st0,m32r");
}
/** Opcode 0xd8 !11/1. */
{
IEMOP_MNEMONIC("fmul st0,m32r");
}
/** Opcode 0xd8 !11/2. */
{
IEMOP_MNEMONIC("fcom st0,m32r");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd8 !11/3. */
{
IEMOP_MNEMONIC("fcomp st0,m32r");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd8 !11/4. */
{
IEMOP_MNEMONIC("fsub st0,m32r");
}
/** Opcode 0xd8 !11/5. */
{
IEMOP_MNEMONIC("fsubr st0,m32r");
}
/** Opcode 0xd8 !11/6. */
{
IEMOP_MNEMONIC("fdiv st0,m32r");
}
/** Opcode 0xd8 !11/7. */
{
IEMOP_MNEMONIC("fdivr st0,m32r");
}
/** Opcode 0xd8. */
{
{
{
}
}
else
{
{
}
}
}
/** Opcode 0xd9 /0 mem32real
* @sa iemOp_fld_m64r */
{
IEMOP_MNEMONIC("fld m32r");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 !11/2 mem32real */
{
IEMOP_MNEMONIC("fst m32r");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 !11/3 */
{
IEMOP_MNEMONIC("fstp m32r");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 !11/4 */
{
IEMOP_MNEMONIC("fldenv m14/28byte");
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 !11/5 */
{
IEMOP_MNEMONIC("fldcw m2byte");
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 !11/6 */
{
IEMOP_MNEMONIC("fstenv m14/m28byte");
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 !11/7 */
{
IEMOP_MNEMONIC("fnstcw m2byte");
IEM_MC_BEGIN(2, 0);
IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 0xc9, 0xd9 0xd8-0xdf, ++?. */
{
IEMOP_MNEMONIC("fnop");
IEM_MC_BEGIN(0, 0);
/** @todo Testcase: looks like FNOP leaves FOP alone but updates FPUIP. Could be
* intel optimizations. Investigate. */
IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 11/0 stN */
{
IEMOP_MNEMONIC("fld stN");
/** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
* indicates that it does. */
IEM_MC_BEGIN(0, 2);
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 11/3 stN */
{
IEMOP_MNEMONIC("fxch stN");
/** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
* indicates that it does. */
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 11/4, 0xdd 11/2. */
{
IEMOP_MNEMONIC("fstp st0,stN");
/* fstp st0, st0 is frequendly used as an official 'ffreep st0' sequence. */
if (!iDstReg)
{
IEM_MC_BEGIN(0, 1);
IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 2);
IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/**
* Common worker for FPU instructions working on ST0 and replaces it with the
* result, i.e. unary operators.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 0xe0. */
{
IEMOP_MNEMONIC("fchs st0");
}
/** Opcode 0xd9 0xe1. */
{
IEMOP_MNEMONIC("fabs st0");
}
/**
* Common worker for FPU instructions working on ST0 and only returns FSW.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 0xe4. */
{
IEMOP_MNEMONIC("ftst st0");
}
/** Opcode 0xd9 0xe5. */
{
IEMOP_MNEMONIC("fxam st0");
}
/**
* Common worker for FPU instructions pushing a constant onto the FPU stack.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 0xe8. */
{
IEMOP_MNEMONIC("fld1");
}
/** Opcode 0xd9 0xe9. */
{
IEMOP_MNEMONIC("fldl2t");
}
/** Opcode 0xd9 0xea. */
{
IEMOP_MNEMONIC("fldl2e");
}
/** Opcode 0xd9 0xeb. */
{
IEMOP_MNEMONIC("fldpi");
}
/** Opcode 0xd9 0xec. */
{
IEMOP_MNEMONIC("fldlg2");
}
/** Opcode 0xd9 0xed. */
{
IEMOP_MNEMONIC("fldln2");
}
/** Opcode 0xd9 0xee. */
{
IEMOP_MNEMONIC("fldz");
}
/** Opcode 0xd9 0xf0. */
{
IEMOP_MNEMONIC("f2xm1 st0");
}
/** Opcode 0xd9 0xf1. */
{
IEMOP_MNEMONIC("fylx2 st0");
}
/**
* Common worker for FPU instructions working on ST0 and having two outputs, one
* replacing ST0 and one pushed onto the stack.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 0xf2. */
{
IEMOP_MNEMONIC("fptan st0");
}
/**
* Common worker for FPU instructions working on STn and ST0, storing the result
* in STn, and popping the stack unless IE, DE or ZE was raised.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 0xf3. */
{
IEMOP_MNEMONIC("fpatan st1,st0");
}
/** Opcode 0xd9 0xf4. */
{
IEMOP_MNEMONIC("fxtract st0");
}
/** Opcode 0xd9 0xf5. */
{
IEMOP_MNEMONIC("fprem1 st0, st1");
}
/** Opcode 0xd9 0xf6. */
{
IEMOP_MNEMONIC("fdecstp");
/* Note! C0, C2 and C3 are documented as undefined, we clear them. */
/** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
* FINCSTP and FDECSTP. */
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 0xf7. */
{
IEMOP_MNEMONIC("fincstp");
/* Note! C0, C2 and C3 are documented as undefined, we clear them. */
/** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
* FINCSTP and FDECSTP. */
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xd9 0xf8. */
{
IEMOP_MNEMONIC("fprem st0, st1");
}
/** Opcode 0xd9 0xf9. */
{
IEMOP_MNEMONIC("fyl2xp1 st1,st0");
}
/** Opcode 0xd9 0xfa. */
{
IEMOP_MNEMONIC("fsqrt st0");
}
/** Opcode 0xd9 0xfb. */
{
IEMOP_MNEMONIC("fsincos st0");
}
/** Opcode 0xd9 0xfc. */
{
IEMOP_MNEMONIC("frndint st0");
}
/** Opcode 0xd9 0xfd. */
{
IEMOP_MNEMONIC("fscale st0, st1");
}
/** Opcode 0xd9 0xfe. */
{
IEMOP_MNEMONIC("fsin st0");
}
/** Opcode 0xd9 0xff. */
{
IEMOP_MNEMONIC("fcos st0");
}
/** Used by iemOp_EscF1. */
{
/* 0xe0 */ iemOp_fchs,
/* 0xe1 */ iemOp_fabs,
/* 0xe2 */ iemOp_Invalid,
/* 0xe3 */ iemOp_Invalid,
/* 0xe4 */ iemOp_ftst,
/* 0xe5 */ iemOp_fxam,
/* 0xe6 */ iemOp_Invalid,
/* 0xe7 */ iemOp_Invalid,
/* 0xe8 */ iemOp_fld1,
/* 0xe9 */ iemOp_fldl2t,
/* 0xea */ iemOp_fldl2e,
/* 0xeb */ iemOp_fldpi,
/* 0xec */ iemOp_fldlg2,
/* 0xed */ iemOp_fldln2,
/* 0xee */ iemOp_fldz,
/* 0xef */ iemOp_Invalid,
/* 0xf0 */ iemOp_f2xm1,
/* 0xf1 */ iemOp_fylx2,
/* 0xf2 */ iemOp_fptan,
/* 0xf3 */ iemOp_fpatan,
/* 0xf4 */ iemOp_fxtract,
/* 0xf5 */ iemOp_fprem1,
/* 0xf6 */ iemOp_fdecstp,
/* 0xf7 */ iemOp_fincstp,
/* 0xf8 */ iemOp_fprem,
/* 0xf9 */ iemOp_fyl2xp1,
/* 0xfa */ iemOp_fsqrt,
/* 0xfb */ iemOp_fsincos,
/* 0xfc */ iemOp_frndint,
/* 0xfd */ iemOp_fscale,
/* 0xfe */ iemOp_fsin,
/* 0xff */ iemOp_fcos
};
/** Opcode 0xd9. */
{
{
{
case 2:
if (bRm == 0xc9)
return FNIEMOP_CALL(iemOp_fnop);
return IEMOP_RAISE_INVALID_OPCODE();
case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved. Intel behavior seems to be FSTP ST(i) though. */
case 4:
case 5:
case 6:
case 7:
}
}
else
{
{
case 1: return IEMOP_RAISE_INVALID_OPCODE();
}
}
}
/** Opcode 0xda 11/0. */
{
IEMOP_MNEMONIC("fcmovb st0,stN");
IEM_MC_BEGIN(0, 1);
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xda 11/1. */
{
IEMOP_MNEMONIC("fcmove st0,stN");
IEM_MC_BEGIN(0, 1);
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xda 11/2. */
{
IEMOP_MNEMONIC("fcmovbe st0,stN");
IEM_MC_BEGIN(0, 1);
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xda 11/3. */
{
IEMOP_MNEMONIC("fcmovu st0,stN");
IEM_MC_BEGIN(0, 1);
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/**
* Common worker for FPU instructions working on ST0 and STn, only affecting
* flags, and popping twice when done.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xda 0xe9. */
{
IEMOP_MNEMONIC("fucompp st0,stN");
}
/**
* Common worker for FPU instructions working on ST0 and an m32i, and storing
* the result in ST0.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xda !11/0. */
{
IEMOP_MNEMONIC("fiadd m32i");
}
/** Opcode 0xda !11/1. */
{
IEMOP_MNEMONIC("fimul m32i");
}
/** Opcode 0xda !11/2. */
{
IEMOP_MNEMONIC("ficom st0,m32i");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xda !11/3. */
{
IEMOP_MNEMONIC("ficomp st0,m32i");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xda !11/4. */
{
IEMOP_MNEMONIC("fisub m32i");
}
/** Opcode 0xda !11/5. */
{
IEMOP_MNEMONIC("fisubr m32i");
}
/** Opcode 0xda !11/6. */
{
IEMOP_MNEMONIC("fidiv m32i");
}
/** Opcode 0xda !11/7. */
{
IEMOP_MNEMONIC("fidivr m32i");
}
/** Opcode 0xda. */
{
{
{
case 4: return IEMOP_RAISE_INVALID_OPCODE();
case 5:
if (bRm == 0xe9)
return FNIEMOP_CALL(iemOp_fucompp);
return IEMOP_RAISE_INVALID_OPCODE();
case 6: return IEMOP_RAISE_INVALID_OPCODE();
case 7: return IEMOP_RAISE_INVALID_OPCODE();
}
}
else
{
{
}
}
}
/** Opcode 0xdb !11/0. */
{
IEMOP_MNEMONIC("fild m32i");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb !11/1. */
{
IEMOP_MNEMONIC("fisttp m32i");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb !11/2. */
{
IEMOP_MNEMONIC("fist m32i");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb !11/3. */
{
IEMOP_MNEMONIC("fisttp m32i");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb !11/5. */
{
IEMOP_MNEMONIC("fld m80r");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb !11/7. */
{
IEMOP_MNEMONIC("fstp m80r");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb 11/0. */
{
IEMOP_MNEMONIC("fcmovnb st0,stN");
IEM_MC_BEGIN(0, 1);
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb 11/1. */
{
IEMOP_MNEMONIC("fcmovne st0,stN");
IEM_MC_BEGIN(0, 1);
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb 11/2. */
{
IEMOP_MNEMONIC("fcmovnbe st0,stN");
IEM_MC_BEGIN(0, 1);
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb 11/3. */
{
IEMOP_MNEMONIC("fcmovnnu st0,stN");
IEM_MC_BEGIN(0, 1);
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb 0xe0. */
{
IEMOP_MNEMONIC("fneni (8087/ign)");
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb 0xe1. */
{
IEMOP_MNEMONIC("fndisi (8087/ign)");
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb 0xe2. */
{
IEMOP_MNEMONIC("fnclex");
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb 0xe3. */
{
IEMOP_MNEMONIC("fninit");
}
/** Opcode 0xdb 0xe4. */
{
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdb 0xe5. */
{
#if 0 /* #UDs on newer CPUs */
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
#else
return IEMOP_RAISE_INVALID_OPCODE();
#endif
}
/** Opcode 0xdb 11/5. */
{
IEMOP_MNEMONIC("fucomi st0,stN");
return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fucomi_r80_by_r80, false /*fPop*/);
}
/** Opcode 0xdb 11/6. */
{
IEMOP_MNEMONIC("fcomi st0,stN");
return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, false /*fPop*/);
}
/** Opcode 0xdb. */
{
{
{
case 4:
switch (bRm)
{
case 0xe6: return IEMOP_RAISE_INVALID_OPCODE();
case 0xe7: return IEMOP_RAISE_INVALID_OPCODE();
}
break;
case 7: return IEMOP_RAISE_INVALID_OPCODE();
}
}
else
{
{
case 4: return IEMOP_RAISE_INVALID_OPCODE();
case 6: return IEMOP_RAISE_INVALID_OPCODE();
}
}
}
/**
* Common worker for FPU instructions working on STn and ST0, and storing the
* result in STn unless IE, DE or ZE was raised.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdc 11/0. */
{
IEMOP_MNEMONIC("fadd stN,st0");
}
/** Opcode 0xdc 11/1. */
{
IEMOP_MNEMONIC("fmul stN,st0");
}
/** Opcode 0xdc 11/4. */
{
IEMOP_MNEMONIC("fsubr stN,st0");
}
/** Opcode 0xdc 11/5. */
{
IEMOP_MNEMONIC("fsub stN,st0");
}
/** Opcode 0xdc 11/6. */
{
IEMOP_MNEMONIC("fdivr stN,st0");
}
/** Opcode 0xdc 11/7. */
{
IEMOP_MNEMONIC("fdiv stN,st0");
}
/**
* Common worker for FPU instructions working on ST0 and a 64-bit floating point
* memory operand, and storing the result in ST0.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdc !11/0. */
{
IEMOP_MNEMONIC("fadd m64r");
}
/** Opcode 0xdc !11/1. */
{
IEMOP_MNEMONIC("fmul m64r");
}
/** Opcode 0xdc !11/2. */
{
IEMOP_MNEMONIC("fcom st0,m64r");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdc !11/3. */
{
IEMOP_MNEMONIC("fcomp st0,m64r");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdc !11/4. */
{
IEMOP_MNEMONIC("fsub m64r");
}
/** Opcode 0xdc !11/5. */
{
IEMOP_MNEMONIC("fsubr m64r");
}
/** Opcode 0xdc !11/6. */
{
IEMOP_MNEMONIC("fdiv m64r");
}
/** Opcode 0xdc !11/7. */
{
IEMOP_MNEMONIC("fdivr m64r");
}
/** Opcode 0xdc. */
{
{
{
case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm); /* Marked reserved, intel behavior is that of FCOM ST(i). */
case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm); /* Marked reserved, intel behavior is that of FCOMP ST(i). */
}
}
else
{
{
}
}
}
/** Opcode 0xdd !11/0.
* @sa iemOp_fld_m32r */
{
IEMOP_MNEMONIC("fld m64r");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdd !11/0. */
{
IEMOP_MNEMONIC("fisttp m64i");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdd !11/0. */
{
IEMOP_MNEMONIC("fst m64r");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdd !11/0. */
{
IEMOP_MNEMONIC("fstp m64r");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdd !11/0. */
{
IEMOP_MNEMONIC("fxrstor m94/108byte");
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdd !11/0. */
{
IEMOP_MNEMONIC("fnsave m94/108byte");
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdd !11/0. */
{
IEMOP_MNEMONIC("fnstsw m16");
IEM_MC_BEGIN(0, 2);
/** @todo Debug / drop a hint to the verifier that things may differ
* from REM. Seen 0x4020 (iem) vs 0x4000 (rem) at 0008:801c6b88 booting
* NT4SP1. (X86_FSW_PE) */
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdd 11/0. */
{
IEMOP_MNEMONIC("ffree stN");
/* Note! C0, C1, C2 and C3 are documented as undefined, we leave the
unmodified. */
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdd 11/1. */
{
IEMOP_MNEMONIC("fst st0,stN");
IEM_MC_BEGIN(0, 2);
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdd 11/3. */
{
IEMOP_MNEMONIC("fcom st0,stN");
}
/** Opcode 0xdd 11/4. */
{
IEMOP_MNEMONIC("fcomp st0,stN");
}
/** Opcode 0xdd. */
{
{
{
case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, intel behavior is that of XCHG ST(i). */
case 6: return IEMOP_RAISE_INVALID_OPCODE();
case 7: return IEMOP_RAISE_INVALID_OPCODE();
}
}
else
{
{
case 5: return IEMOP_RAISE_INVALID_OPCODE();
}
}
}
/** Opcode 0xde 11/0. */
{
IEMOP_MNEMONIC("faddp stN,st0");
}
/** Opcode 0xde 11/0. */
{
IEMOP_MNEMONIC("fmulp stN,st0");
}
/** Opcode 0xde 0xd9. */
{
IEMOP_MNEMONIC("fucompp st0,stN");
}
/** Opcode 0xde 11/4. */
{
IEMOP_MNEMONIC("fsubrp stN,st0");
}
/** Opcode 0xde 11/5. */
{
IEMOP_MNEMONIC("fsubp stN,st0");
}
/** Opcode 0xde 11/6. */
{
IEMOP_MNEMONIC("fdivrp stN,st0");
}
/** Opcode 0xde 11/7. */
{
IEMOP_MNEMONIC("fdivp stN,st0");
}
/**
* Common worker for FPU instructions working on ST0 and an m16i, and storing
* the result in ST0.
*
* @param pfnAImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xde !11/0. */
{
IEMOP_MNEMONIC("fiadd m16i");
}
/** Opcode 0xde !11/1. */
{
IEMOP_MNEMONIC("fimul m16i");
}
/** Opcode 0xde !11/2. */
{
IEMOP_MNEMONIC("ficom st0,m16i");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xde !11/3. */
{
IEMOP_MNEMONIC("ficomp st0,m16i");
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xde !11/4. */
{
IEMOP_MNEMONIC("fisub m16i");
}
/** Opcode 0xde !11/5. */
{
IEMOP_MNEMONIC("fisubr m16i");
}
/** Opcode 0xde !11/6. */
{
IEMOP_MNEMONIC("fiadd m16i");
}
/** Opcode 0xde !11/7. */
{
IEMOP_MNEMONIC("fiadd m16i");
}
/** Opcode 0xde. */
{
{
{
return FNIEMOP_CALL(iemOp_fcompp);
return IEMOP_RAISE_INVALID_OPCODE();
}
}
else
{
{
}
}
}
/** Opcode 0xdf 11/0.
* Undocument instruction, assumed to work like ffree + fincstp. */
{
IEMOP_MNEMONIC("ffreep stN");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdf 0xe0. */
{
IEMOP_MNEMONIC("fnstsw ax");
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdf 11/5. */
{
IEMOP_MNEMONIC("fcomip st0,stN");
return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
}
/** Opcode 0xdf 11/6. */
{
IEMOP_MNEMONIC("fcomip st0,stN");
return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
}
/** Opcode 0xdf !11/0. */
/** Opcode 0xdf !11/1. */
{
IEMOP_MNEMONIC("fisttp m16i");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdf !11/2. */
{
IEMOP_MNEMONIC("fistp m16i");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdf !11/3. */
{
IEMOP_MNEMONIC("fistp m16i");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdf !11/4. */
/** Opcode 0xdf !11/5. */
/** Opcode 0xdf !11/6. */
/** Opcode 0xdf !11/7. */
{
IEMOP_MNEMONIC("fistp m64i");
IEM_MC_ENDIF();
IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xdf. */
{
{
{
case 0: return FNIEMOP_CALL_1(iemOp_ffreep_stN, bRm); /* ffree + pop afterwards, since forever according to AMD. */
case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, behaves like FXCH ST(i) on intel. */
case 2: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
return FNIEMOP_CALL(iemOp_fnstsw_ax);
return IEMOP_RAISE_INVALID_OPCODE();
case 7: return IEMOP_RAISE_INVALID_OPCODE();
}
}
else
{
{
}
}
}
/** Opcode 0xe0. */
{
IEMOP_MNEMONIC("loopne Jb");
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xe1. */
{
IEMOP_MNEMONIC("loope Jb");
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xe2. */
{
IEMOP_MNEMONIC("loop Jb");
/** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
* using the 32-bit operand size override. How can that be restarted? See
* weird pseudo code in intel manual. */
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xe3. */
{
IEMOP_MNEMONIC("jecxz Jb");
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xe4 */
{
IEMOP_MNEMONIC("in eAX,Ib");
}
/** Opcode 0xe5 */
{
IEMOP_MNEMONIC("in eAX,Ib");
}
/** Opcode 0xe6 */
{
IEMOP_MNEMONIC("out Ib,AL");
}
/** Opcode 0xe7 */
{
IEMOP_MNEMONIC("out Ib,eAX");
return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
}
/** Opcode 0xe8. */
{
IEMOP_MNEMONIC("call Jv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
}
case IEMMODE_32BIT:
{
}
case IEMMODE_64BIT:
{
}
}
}
/** Opcode 0xe9. */
{
IEMOP_MNEMONIC("jmp Jv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xea. */
{
IEMOP_MNEMONIC("jmp Ap");
/* Decode the far pointer address and pass it on to the far call C implementation. */
else
}
/** Opcode 0xeb. */
{
IEMOP_MNEMONIC("jmp Jb");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xec */
{
IEMOP_MNEMONIC("in AL,DX");
}
/** Opcode 0xed */
{
IEMOP_MNEMONIC("in eAX,DX");
}
/** Opcode 0xee */
{
IEMOP_MNEMONIC("out DX,AL");
}
/** Opcode 0xef */
{
IEMOP_MNEMONIC("out DX,eAX");
return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
}
/** Opcode 0xf0. */
{
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0xf2. */
{
/* This overrides any previous REPE prefix. */
IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repne");
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0xf3. */
{
/* This overrides any previous REPNE prefix. */
uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0xf4. */
{
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
}
/** Opcode 0xf5. */
{
IEMOP_MNEMONIC("cmc");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/**
*
* @param bRm The RM byte.
* @param pImpl The instruction implementation.
*/
{
{
/* register access */
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
}
else
{
/* memory access. */
else
IEM_MC_END();
}
return VINF_SUCCESS;
}
/**
*
* @param bRm The RM byte.
* @param pImpl The instruction implementation.
*/
{
/* Registers are handled by a common worker. */
/* Memory we do here. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
else
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xf6 /0. */
{
IEMOP_MNEMONIC("test Eb,Ib");
{
/* register access */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory access. */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xf7 /0. */
{
IEMOP_MNEMONIC("test Ev,Iv");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
{
/* register access */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(3, 0);
/* No clearing the high dword here - test doesn't write back the result. */
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
else
{
/* memory access. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
}
/** Opcode 0xf6 /4, /5, /6 and /7. */
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
{
/* register access */
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
/* memory access. */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xf7 /4, /5, /6 and /7. */
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
{
/* register access */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
else
{
/* memory access. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
}
/** Opcode 0xf6. */
{
{
case 0:
case 1:
return IEMOP_RAISE_INVALID_OPCODE();
case 2:
IEMOP_MNEMONIC("not Eb");
case 3:
IEMOP_MNEMONIC("neg Eb");
case 4:
IEMOP_MNEMONIC("mul Eb");
case 5:
IEMOP_MNEMONIC("imul Eb");
case 6:
IEMOP_MNEMONIC("div Eb");
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
case 7:
IEMOP_MNEMONIC("idiv Eb");
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
}
}
/** Opcode 0xf7. */
{
{
case 0:
case 1:
return IEMOP_RAISE_INVALID_OPCODE();
case 2:
IEMOP_MNEMONIC("not Ev");
case 3:
IEMOP_MNEMONIC("neg Ev");
case 4:
IEMOP_MNEMONIC("mul Ev");
case 5:
IEMOP_MNEMONIC("imul Ev");
case 6:
IEMOP_MNEMONIC("div Ev");
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
case 7:
IEMOP_MNEMONIC("idiv Ev");
IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
}
}
/** Opcode 0xf8. */
{
IEMOP_MNEMONIC("clc");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xf9. */
{
IEMOP_MNEMONIC("stc");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xfa. */
{
IEMOP_MNEMONIC("cli");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
}
{
IEMOP_MNEMONIC("sti");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
}
/** Opcode 0xfc. */
{
IEMOP_MNEMONIC("cld");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xfd. */
{
IEMOP_MNEMONIC("std");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xfe. */
{
{
case 0:
IEMOP_MNEMONIC("inc Ev");
case 1:
IEMOP_MNEMONIC("dec Ev");
default:
IEMOP_MNEMONIC("grp4-ud");
return IEMOP_RAISE_INVALID_OPCODE();
}
}
/**
* Opcode 0xff /2.
* @param bRm The RM byte.
*/
{
IEMOP_MNEMONIC("calln Ev");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
{
/* The new RIP is taken from a register. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(1, 0);
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(1, 0);
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(1, 0);
return VINF_SUCCESS;
}
}
else
{
/* The new RIP is taken from a register. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
return VINF_SUCCESS;
case IEMMODE_32BIT:
return VINF_SUCCESS;
case IEMMODE_64BIT:
return VINF_SUCCESS;
}
}
}
typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize);
{
/* Registers? How?? */
return IEMOP_RAISE_INVALID_OPCODE(); /* callf eax is not legal */
/* Far pointer loaded from memory. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
/** @todo testcase: AMD does not seem to believe in the case (see bs-cpu-xcpt-1)
* and will apparently ignore REX.W, at least for the jmp far qword [rsp]
* and call far qword [rsp] encodings. */
if (!IEM_IS_GUEST_CPU_AMD(pIemCpu))
{
IEM_MC_END();
return VINF_SUCCESS;
}
/* AMD falls thru. */
case IEMMODE_32BIT:
IEM_MC_END();
return VINF_SUCCESS;
}
}
/**
* Opcode 0xff /3.
* @param bRm The RM byte.
*/
{
IEMOP_MNEMONIC("callf Ep");
}
/**
* Opcode 0xff /4.
* @param bRm The RM byte.
*/
{
IEMOP_MNEMONIC("jmpn Ev");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
{
/* The new RIP is taken from a register. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
return VINF_SUCCESS;
}
}
else
{
/* The new RIP is taken from a memory location. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
return VINF_SUCCESS;
}
}
}
/**
* Opcode 0xff /5.
* @param bRm The RM byte.
*/
{
IEMOP_MNEMONIC("jmpf Ep");
}
/**
* Opcode 0xff /6.
* @param bRm The RM byte.
*/
{
IEMOP_MNEMONIC("push Ev");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
/* Registers are handled by a common worker. */
/* Memory we do here. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xff. */
{
{
case 0:
IEMOP_MNEMONIC("inc Ev");
case 1:
IEMOP_MNEMONIC("dec Ev");
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
IEMOP_MNEMONIC("grp5-ud");
return IEMOP_RAISE_INVALID_OPCODE();
}
}
{
};
/** @} */