IEMAllInstructions.cpp.h revision e41f0459369a6d814aa36bf4def225482fc56026
/* $Id$ */
/** @file
* IEM - Instruction Decoding and Emulation.
*/
/*
* Copyright (C) 2011 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/**
* Common worker for instructions like ADD, AND, OR, ++ with a byte
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
else
{
/*
* We're accessing memory.
* Note! We're putting the eflags on the stack here so we can commit them
* after the memory.
*/
uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
}
return VINF_SUCCESS;
}
/**
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
else
{
/*
* We're accessing memory.
* Note! We're putting the eflags on the stack here so we can commit them
* after the memory.
*/
uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
else
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/**
* Common worker for byte instructions like ADD, AND, OR, ++ with a register as
* the destination.
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
else
{
/*
* We're accessing memory.
*/
IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/**
* register as the destination.
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
else
{
/*
* We're accessing memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/**
* Common worker for instructions like ADD, AND, OR, ++ with working on AL with
* a byte immediate.
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/**
* Common worker for instructions like ADD, AND, OR, ++ with working on
*
* @param pImpl Pointer to the instruction implementation (assembly).
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcodes 0xf1, 0xd6. */
{
IEMOP_MNEMONIC("Invalid");
return IEMOP_RAISE_INVALID_OPCODE();
}
/** @name ..... opcodes.
*
* @{
*/
/** @} */
/** @name Two byte opcodes (first byte 0x0f).
*
* @{
*/
/** Opcode 0x0f 0x00 /0. */
{
}
/** Opcode 0x0f 0x00 /1. */
{
}
/** Opcode 0x0f 0x00 /2. */
{
}
/** Opcode 0x0f 0x00 /3. */
{
}
/** Opcode 0x0f 0x00 /4. */
{
}
/** Opcode 0x0f 0x00 /5. */
{
}
/** Opcode 0x0f 0x00. */
{
{
case 6: return IEMOP_RAISE_INVALID_OPCODE();
case 7: return IEMOP_RAISE_INVALID_OPCODE();
}
}
/** Opcode 0x0f 0x01 /0. */
{
}
/** Opcode 0x0f 0x01 /0. */
{
AssertFailed();
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /0. */
{
AssertFailed();
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /0. */
{
AssertFailed();
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /0. */
{
AssertFailed();
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /1. */
{
}
/** Opcode 0x0f 0x01 /1. */
{
}
/** Opcode 0x0f 0x01 /1. */
{
}
/** Opcode 0x0f 0x01 /2. */
{
: pIemCpu->enmEffOpSize;
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x01 /2. */
{
AssertFailed();
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /2. */
{
AssertFailed();
return IEMOP_RAISE_INVALID_OPCODE();
}
/** Opcode 0x0f 0x01 /3. */
{
: pIemCpu->enmEffOpSize;
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x01 /4. */
{
}
/** Opcode 0x0f 0x01 /6. */
{
}
/** Opcode 0x0f 0x01 /7. */
{
}
/** Opcode 0x0f 0x01 /7. */
{
}
/** Opcode 0x0f 0x01 /7. */
{
}
/** Opcode 0x0f 0x01. */
{
{
case 0:
switch (bRm & X86_MODRM_RM_MASK)
{
}
return IEMOP_RAISE_INVALID_OPCODE();
case 1:
switch (bRm & X86_MODRM_RM_MASK)
{
case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
}
return IEMOP_RAISE_INVALID_OPCODE();
case 2:
switch (bRm & X86_MODRM_RM_MASK)
{
case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
}
return IEMOP_RAISE_INVALID_OPCODE();
case 3:
return IEMOP_RAISE_INVALID_OPCODE();
case 4:
case 5:
return IEMOP_RAISE_INVALID_OPCODE();
case 6:
case 7:
switch (bRm & X86_MODRM_RM_MASK)
{
case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
}
return IEMOP_RAISE_INVALID_OPCODE();
}
}
/** Opcode 0x0f 0x02. */
/** Opcode 0x0f 0x03. */
/** Opcode 0x0f 0x04. */
/** Opcode 0x0f 0x05. */
/** Opcode 0x0f 0x06. */
/** Opcode 0x0f 0x08. */
/** Opcode 0x0f 0x09. */
/** Opcode 0x0f 0x0b. */
/** Opcode 0x0f 0x0d. */
/** Opcode 0x0f 0x0e. */
/** Opcode 0x0f 0x0f. */
/** Opcode 0x0f 0x10. */
/** Opcode 0x0f 0x11. */
/** Opcode 0x0f 0x12. */
/** Opcode 0x0f 0x13. */
/** Opcode 0x0f 0x14. */
/** Opcode 0x0f 0x15. */
/** Opcode 0x0f 0x16. */
/** Opcode 0x0f 0x17. */
/** Opcode 0x0f 0x18. */
/** Opcode 0x0f 0x20. */
{
/* mod is ignored, as is operand size overrides. */
IEMOP_MNEMONIC("mov Rd,Cd");
else
/** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
* before the privilege level violation (\#GP). */
{
/* The lock prefix can be used to encode CR8 accesses on some CPUs. */
return IEMOP_RAISE_INVALID_LOCK_PREFIX();
iCrReg |= 8;
}
switch (iCrReg)
{
case 0: case 2: case 3: case 4: case 8:
break;
default:
return IEMOP_RAISE_INVALID_OPCODE();
}
return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
}
/** Opcode 0x0f 0x21. */
/** Opcode 0x0f 0x22. */
{
/* mod is ignored, as is operand size overrides. */
IEMOP_MNEMONIC("mov Cd,Rd");
else
/** @todo Verify that the the invalid lock sequence exception (\#UD) is raised
* before the privilege level violation (\#GP). */
{
/* The lock prefix can be used to encode CR8 accesses on some CPUs. */
return IEMOP_RAISE_INVALID_LOCK_PREFIX();
iCrReg |= 8;
}
switch (iCrReg)
{
case 0: case 2: case 3: case 4: case 8:
break;
default:
return IEMOP_RAISE_INVALID_OPCODE();
}
return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
}
/** Opcode 0x0f 0x23. */
/** Opcode 0x0f 0x24. */
/** Opcode 0x0f 0x26. */
/** Opcode 0x0f 0x28. */
/** Opcode 0x0f 0x29. */
/** Opcode 0x0f 0x2a. */
/** Opcode 0x0f 0x2b. */
/** Opcode 0x0f 0x2c. */
/** Opcode 0x0f 0x2d. */
/** Opcode 0x0f 0x2e. */
/** Opcode 0x0f 0x2f. */
/** Opcode 0x0f 0x30. */
/** Opcode 0x0f 0x31. */
/** Opcode 0x0f 0x33. */
/** Opcode 0x0f 0x34. */
/** Opcode 0x0f 0x34. */
/** Opcode 0x0f 0x35. */
/** Opcode 0x0f 0x37. */
/** Opcode 0x0f 0x38. */
/** Opcode 0x0f 0x39. */
/** Opcode 0x0f 0x3c (?). */
/** Opcode 0x0f 0x40. */
/** Opcode 0x0f 0x41. */
/** Opcode 0x0f 0x42. */
/** Opcode 0x0f 0x43. */
/** Opcode 0x0f 0x44. */
/** Opcode 0x0f 0x45. */
/** Opcode 0x0f 0x46. */
/** Opcode 0x0f 0x47. */
/** Opcode 0x0f 0x48. */
/** Opcode 0x0f 0x49. */
/** Opcode 0x0f 0x4a. */
/** Opcode 0x0f 0x4b. */
/** Opcode 0x0f 0x4c. */
/** Opcode 0x0f 0x4d. */
/** Opcode 0x0f 0x4e. */
/** Opcode 0x0f 0x4f. */
/** Opcode 0x0f 0x50. */
/** Opcode 0x0f 0x51. */
/** Opcode 0x0f 0x52. */
/** Opcode 0x0f 0x53. */
/** Opcode 0x0f 0x54. */
/** Opcode 0x0f 0x55. */
/** Opcode 0x0f 0x56. */
/** Opcode 0x0f 0x57. */
/** Opcode 0x0f 0x58. */
/** Opcode 0x0f 0x59. */
/** Opcode 0x0f 0x5a. */
/** Opcode 0x0f 0x5b. */
/** Opcode 0x0f 0x5c. */
/** Opcode 0x0f 0x5d. */
/** Opcode 0x0f 0x5e. */
/** Opcode 0x0f 0x5f. */
/** Opcode 0x0f 0x60. */
/** Opcode 0x0f 0x61. */
/** Opcode 0x0f 0x62. */
/** Opcode 0x0f 0x63. */
/** Opcode 0x0f 0x64. */
/** Opcode 0x0f 0x65. */
/** Opcode 0x0f 0x66. */
/** Opcode 0x0f 0x67. */
/** Opcode 0x0f 0x68. */
/** Opcode 0x0f 0x69. */
/** Opcode 0x0f 0x6a. */
/** Opcode 0x0f 0x6b. */
/** Opcode 0x0f 0x6c. */
/** Opcode 0x0f 0x6d. */
/** Opcode 0x0f 0x6e. */
/** Opcode 0x0f 0x6f. */
/** Opcode 0x0f 0x70. */
/** Opcode 0x0f 0x71. */
/** Opcode 0x0f 0x72. */
/** Opcode 0x0f 0x73. */
/** Opcode 0x0f 0x74. */
/** Opcode 0x0f 0x75. */
/** Opcode 0x0f 0x76. */
/** Opcode 0x0f 0x77. */
/** Opcode 0x0f 0x78. */
/** Opcode 0x0f 0x79. */
/** Opcode 0x0f 0x7c. */
/** Opcode 0x0f 0x7d. */
/** Opcode 0x0f 0x7e. */
/** Opcode 0x0f 0x7f. */
/** Opcode 0x0f 0x80. */
{
IEMOP_MNEMONIC("jo Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x81. */
{
IEMOP_MNEMONIC("jno Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x82. */
{
IEMOP_MNEMONIC("jc/jb/jnae Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x83. */
{
IEMOP_MNEMONIC("jnc/jnb/jae Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x84. */
{
IEMOP_MNEMONIC("je/jz Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x85. */
{
IEMOP_MNEMONIC("jne/jnz Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x86. */
{
IEMOP_MNEMONIC("jbe/jna Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x87. */
{
IEMOP_MNEMONIC("jnbe/ja Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x88. */
{
IEMOP_MNEMONIC("js Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x89. */
{
IEMOP_MNEMONIC("jns Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8a. */
{
IEMOP_MNEMONIC("jp Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8b. */
{
IEMOP_MNEMONIC("jo Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8c. */
{
IEMOP_MNEMONIC("jl/jnge Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8d. */
{
IEMOP_MNEMONIC("jnl/jge Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8e. */
{
IEMOP_MNEMONIC("jle/jng Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x8f. */
{
IEMOP_MNEMONIC("jnle/jg Jv");
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
else
{
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x0f 0x90. */
/** Opcode 0x0f 0x91. */
/** Opcode 0x0f 0x92. */
/** Opcode 0x0f 0x93. */
/** Opcode 0x0f 0x94. */
/** Opcode 0x0f 0x95. */
/** Opcode 0x0f 0x96. */
/** Opcode 0x0f 0x97. */
/** Opcode 0x0f 0x98. */
/** Opcode 0x0f 0x99. */
/** Opcode 0x0f 0x9a. */
/** Opcode 0x0f 0x9b. */
/** Opcode 0x0f 0x9c. */
/** Opcode 0x0f 0x9d. */
/** Opcode 0x0f 0x9e. */
/** Opcode 0x0f 0x9f. */
/** Opcode 0x0f 0xa0. */
/** Opcode 0x0f 0xa1. */
/** Opcode 0x0f 0xa2. */
/** Opcode 0x0f 0xa3. */
/** Opcode 0x0f 0xa4. */
/** Opcode 0x0f 0xa7. */
/** Opcode 0x0f 0xa8. */
/** Opcode 0x0f 0xa9. */
/** Opcode 0x0f 0xaa. */
/** Opcode 0x0f 0xab. */
/** Opcode 0x0f 0xac. */
/** Opcode 0x0f 0xad. */
/** Opcode 0x0f 0xae. */
/** Opcode 0x0f 0xaf. */
/** Opcode 0x0f 0xb0. */
/** Opcode 0x0f 0xb1. */
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/* The source cannot be a register. */
return IEMOP_RAISE_INVALID_LOCK_PREFIX();
uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & bRm & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x0f 0xb2. */
{
IEMOP_MNEMONIC("lss Gv,Mp");
}
/** Opcode 0x0f 0xb3. */
/** Opcode 0x0f 0xb4. */
{
IEMOP_MNEMONIC("lfs Gv,Mp");
}
/** Opcode 0x0f 0xb5. */
{
IEMOP_MNEMONIC("lgs Gv,Mp");
}
/** Opcode 0x0f 0xb6. */
{
IEMOP_MNEMONIC("movzx Gv,Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/*
* We're loading a register from memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x0f 0xb7. */
/** Opcode 0x0f 0xb8. */
/** Opcode 0x0f 0xb9. */
/** Opcode 0x0f 0xba. */
/** Opcode 0x0f 0xbb. */
/** Opcode 0x0f 0xbc. */
/** Opcode 0x0f 0xbd. */
/** Opcode 0x0f 0xbe. */
/** Opcode 0x0f 0xbf. */
/** Opcode 0x0f 0xc0. */
/** Opcode 0x0f 0xc1. */
/** Opcode 0x0f 0xc2. */
/** Opcode 0x0f 0xc3. */
/** Opcode 0x0f 0xc4. */
/** Opcode 0x0f 0xc5. */
/** Opcode 0x0f 0xc6. */
/** Opcode 0x0f 0xc7. */
/** Opcode 0x0f 0xc8. */
/** Opcode 0x0f 0xc9. */
/** Opcode 0x0f 0xca. */
/** Opcode 0x0f 0xcb. */
/** Opcode 0x0f 0xcc. */
/** Opcode 0x0f 0xcd. */
/** Opcode 0x0f 0xce. */
/** Opcode 0x0f 0xcf. */
/** Opcode 0x0f 0xd0. */
/** Opcode 0x0f 0xd1. */
/** Opcode 0x0f 0xd2. */
/** Opcode 0x0f 0xd3. */
/** Opcode 0x0f 0xd4. */
/** Opcode 0x0f 0xd5. */
/** Opcode 0x0f 0xd6. */
/** Opcode 0x0f 0xd7. */
/** Opcode 0x0f 0xd8. */
/** Opcode 0x0f 0xd9. */
/** Opcode 0x0f 0xda. */
/** Opcode 0x0f 0xdb. */
/** Opcode 0x0f 0xdc. */
/** Opcode 0x0f 0xdd. */
/** Opcode 0x0f 0xde. */
/** Opcode 0x0f 0xdf. */
/** Opcode 0x0f 0xe0. */
/** Opcode 0x0f 0xe1. */
/** Opcode 0x0f 0xe2. */
/** Opcode 0x0f 0xe3. */
/** Opcode 0x0f 0xe4. */
/** Opcode 0x0f 0xe5. */
/** Opcode 0x0f 0xe6. */
/** Opcode 0x0f 0xe7. */
/** Opcode 0x0f 0xe8. */
/** Opcode 0x0f 0xe9. */
/** Opcode 0x0f 0xea. */
/** Opcode 0x0f 0xeb. */
/** Opcode 0x0f 0xec. */
/** Opcode 0x0f 0xed. */
/** Opcode 0x0f 0xee. */
/** Opcode 0x0f 0xef. */
/** Opcode 0x0f 0xf0. */
/** Opcode 0x0f 0xf1. */
/** Opcode 0x0f 0xf2. */
/** Opcode 0x0f 0xf3. */
/** Opcode 0x0f 0xf4. */
/** Opcode 0x0f 0xf5. */
/** Opcode 0x0f 0xf6. */
/** Opcode 0x0f 0xf7. */
/** Opcode 0x0f 0xf8. */
/** Opcode 0x0f 0xf9. */
/** Opcode 0x0f 0xfa. */
/** Opcode 0x0f 0xfb. */
/** Opcode 0x0f 0xfc. */
/** Opcode 0x0f 0xfd. */
/** Opcode 0x0f 0xfe. */
{
/* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
/* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
/* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
/* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
/* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
/* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
/* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
/* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
/* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
/* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
/* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
/* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
/* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
/* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
/* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
/* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
/* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
/* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
/* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
/* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
/* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
/* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
/* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
/* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
/* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
/* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
/* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
/* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
/* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
/* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
/* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
/* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
/* 0x71 */ iemOp_Grp12,
/* 0x72 */ iemOp_Grp13,
/* 0x73 */ iemOp_Grp14,
/* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
/* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
/* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
/* 0x77 */ iemOp_emms,
/* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
/* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
/* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
/* 0xc0 */ iemOp_xadd_Eb_Gb,
/* 0xc1 */ iemOp_xadd_Ev_Gv,
/* 0xc3 */ iemOp_movnti_My_Gy,
/* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
/* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
/* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
/* 0xc7 */ iemOp_Grp9,
/* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
/* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
/* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
/* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
/* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
/* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
/* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
/* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
/* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
/* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
/* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
/* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
/* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
/* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
/* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
/* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
/* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
/* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
/* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
/* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
/* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
/* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
/* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
/* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
/* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
/* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
/* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
/* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
/* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
/* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
/* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
/* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
/* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
/* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
/* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
/* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
/* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
/* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
/* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
/* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
/* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
/* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
/* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
/* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
/* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
/* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
/* 0xff */ iemOp_Invalid
};
/** @} */
/** @name One byte opcodes.
*
* @{
*/
/** Opcode 0x00. */
{
IEMOP_MNEMONIC("add Eb,Gb");
}
/** Opcode 0x01. */
{
IEMOP_MNEMONIC("add Gv,Ev");
}
/** Opcode 0x02. */
{
IEMOP_MNEMONIC("add Gb,Eb");
}
/** Opcode 0x03. */
{
IEMOP_MNEMONIC("add Gv,Ev");
}
/** Opcode 0x04. */
{
IEMOP_MNEMONIC("add al,Ib");
}
/** Opcode 0x05. */
{
IEMOP_MNEMONIC("add rAX,Iz");
}
/**
* Common 'push segment-register' helper.
*/
{
if (iReg < X86_SREG_FS)
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
return VINF_SUCCESS;
}
/** Opcode 0x06. */
{
IEMOP_MNEMONIC("push es");
}
/** Opcode 0x07. */
{
IEMOP_MNEMONIC("pop es");
}
/** Opcode 0x08. */
{
IEMOP_MNEMONIC("or Eb,Gb");
}
/** Opcode 0x09. */
{
IEMOP_MNEMONIC("or Ev,Gv ");
}
/** Opcode 0x0a. */
{
IEMOP_MNEMONIC("or Gb,Eb");
}
/** Opcode 0x0b. */
{
IEMOP_MNEMONIC("or Gv,Ev");
}
/** Opcode 0x0c. */
{
IEMOP_MNEMONIC("or al,Ib");
}
/** Opcode 0x0d. */
{
IEMOP_MNEMONIC("or rAX,Iz");
}
/** Opcode 0x0e. */
{
IEMOP_MNEMONIC("push cs");
}
/** Opcode 0x0f. */
{
return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
}
/** Opcode 0x10. */
{
IEMOP_MNEMONIC("adc Eb,Gb");
}
/** Opcode 0x11. */
{
IEMOP_MNEMONIC("adc Ev,Gv");
}
/** Opcode 0x12. */
{
IEMOP_MNEMONIC("adc Gb,Eb");
}
/** Opcode 0x13. */
{
IEMOP_MNEMONIC("adc Gv,Ev");
}
/** Opcode 0x14. */
{
IEMOP_MNEMONIC("adc al,Ib");
}
/** Opcode 0x15. */
{
IEMOP_MNEMONIC("adc rAX,Iz");
}
/** Opcode 0x16. */
{
IEMOP_MNEMONIC("push ss");
}
/** Opcode 0x17. */
{
}
/** Opcode 0x18. */
{
IEMOP_MNEMONIC("sbb Eb,Gb");
}
/** Opcode 0x19. */
{
IEMOP_MNEMONIC("sbb Ev,Gv");
}
/** Opcode 0x1a. */
{
IEMOP_MNEMONIC("sbb Gb,Eb");
}
/** Opcode 0x1b. */
{
IEMOP_MNEMONIC("sbb Gv,Ev");
}
/** Opcode 0x1c. */
{
IEMOP_MNEMONIC("sbb al,Ib");
}
/** Opcode 0x1d. */
{
IEMOP_MNEMONIC("sbb rAX,Iz");
}
/** Opcode 0x1e. */
{
IEMOP_MNEMONIC("push ds");
}
/** Opcode 0x1f. */
{
IEMOP_MNEMONIC("pop ds");
}
/** Opcode 0x20. */
{
IEMOP_MNEMONIC("and Eb,Gb");
}
/** Opcode 0x21. */
{
IEMOP_MNEMONIC("and Ev,Gv");
}
/** Opcode 0x22. */
{
IEMOP_MNEMONIC("and Gb,Eb");
}
/** Opcode 0x23. */
{
IEMOP_MNEMONIC("and Gv,Ev");
}
/** Opcode 0x24. */
{
IEMOP_MNEMONIC("and al,Ib");
}
/** Opcode 0x25. */
{
IEMOP_MNEMONIC("and rAX,Iz");
}
/** Opcode 0x26. */
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x27. */
/** Opcode 0x28. */
{
IEMOP_MNEMONIC("sub Eb,Gb");
}
/** Opcode 0x29. */
{
IEMOP_MNEMONIC("sub Ev,Gv");
}
/** Opcode 0x2a. */
{
IEMOP_MNEMONIC("sub Gb,Eb");
}
/** Opcode 0x2b. */
{
IEMOP_MNEMONIC("sub Gv,Ev");
}
/** Opcode 0x2c. */
{
IEMOP_MNEMONIC("sub al,Ib");
}
/** Opcode 0x2d. */
{
IEMOP_MNEMONIC("sub rAX,Iz");
}
/** Opcode 0x2e. */
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x2f. */
/** Opcode 0x30. */
{
IEMOP_MNEMONIC("xor Eb,Gb");
}
/** Opcode 0x31. */
{
IEMOP_MNEMONIC("xor Ev,Gv");
}
/** Opcode 0x32. */
{
IEMOP_MNEMONIC("xor Gb,Eb");
}
/** Opcode 0x33. */
{
IEMOP_MNEMONIC("xor Gv,Ev");
}
/** Opcode 0x34. */
{
IEMOP_MNEMONIC("xor al,Ib");
}
/** Opcode 0x35. */
{
IEMOP_MNEMONIC("xor rAX,Iz");
}
/** Opcode 0x36. */
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x37. */
/** Opcode 0x38. */
{
IEMOP_MNEMONIC("cmp Eb,Gb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
}
/** Opcode 0x39. */
{
IEMOP_MNEMONIC("cmp Ev,Gv");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
}
/** Opcode 0x3a. */
{
IEMOP_MNEMONIC("cmp Gb,Eb");
}
/** Opcode 0x3b. */
{
IEMOP_MNEMONIC("cmp Gv,Ev");
}
/** Opcode 0x3c. */
{
IEMOP_MNEMONIC("cmp al,Ib");
}
/** Opcode 0x3d. */
{
IEMOP_MNEMONIC("cmp rAX,Iz");
}
/** Opcode 0x3e. */
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x3f. */
/**
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
return VINF_SUCCESS;
}
/** Opcode 0x40. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eAX");
}
/** Opcode 0x41. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eCX");
}
/** Opcode 0x42. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eDX");
}
/** Opcode 0x43. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eBX");
}
/** Opcode 0x44. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eSP");
}
/** Opcode 0x45. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eBP");
}
/** Opcode 0x46. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eSI");
}
/** Opcode 0x47. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("inc eDI");
}
/** Opcode 0x48. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eAX");
}
/** Opcode 0x49. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eCX");
}
/** Opcode 0x4a. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eDX");
}
/** Opcode 0x4b. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eBX");
}
/** Opcode 0x4c. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eSP");
}
/** Opcode 0x4d. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eBP");
}
/** Opcode 0x4e. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eSI");
}
/** Opcode 0x4f. */
{
/*
* This is a REX prefix in 64-bit mode.
*/
{
pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
IEMOP_MNEMONIC("dec eDI");
}
/**
* Common 'push register' helper.
*/
{
{
}
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
return VINF_SUCCESS;
}
/** Opcode 0x50. */
{
IEMOP_MNEMONIC("push rAX");
}
/** Opcode 0x51. */
{
IEMOP_MNEMONIC("push rCX");
}
/** Opcode 0x52. */
{
IEMOP_MNEMONIC("push rDX");
}
/** Opcode 0x53. */
{
IEMOP_MNEMONIC("push rBX");
}
/** Opcode 0x54. */
{
IEMOP_MNEMONIC("push rSP");
}
/** Opcode 0x55. */
{
IEMOP_MNEMONIC("push rBP");
}
/** Opcode 0x56. */
{
IEMOP_MNEMONIC("push rSI");
}
/** Opcode 0x57. */
{
IEMOP_MNEMONIC("push rDI");
}
/**
* Common 'pop register' helper.
*/
{
{
}
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
return VINF_SUCCESS;
}
/** Opcode 0x58. */
{
IEMOP_MNEMONIC("pop rAX");
}
/** Opcode 0x59. */
{
IEMOP_MNEMONIC("pop rCX");
}
/** Opcode 0x5a. */
{
IEMOP_MNEMONIC("pop rDX");
}
/** Opcode 0x5b. */
{
IEMOP_MNEMONIC("pop rBX");
}
/** Opcode 0x5c. */
{
IEMOP_MNEMONIC("pop rSP");
}
/** Opcode 0x5d. */
{
IEMOP_MNEMONIC("pop rBP");
}
/** Opcode 0x5e. */
{
IEMOP_MNEMONIC("pop rSI");
}
/** Opcode 0x5f. */
{
IEMOP_MNEMONIC("pop rDI");
}
/** Opcode 0x60. */
{
IEMOP_MNEMONIC("pusha");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
}
/** Opcode 0x61. */
{
IEMOP_MNEMONIC("popa");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
}
/** Opcode 0x62. */
/** Opcode 0x63. */
/** Opcode 0x64. */
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x65. */
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x66. */
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x67. */
{
switch (pIemCpu->enmDefAddrMode)
{
default: AssertFailed();
}
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0x68. */
{
IEMOP_MNEMONIC("push Iz");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(0,0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0x69. */
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
{
/* register operand */
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
{
/* register operand */
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
{
/* register operand */
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
}
}
}
/** Opcode 0x6a. */
{
IEMOP_MNEMONIC("push Ib");
IEM_MC_BEGIN(0,0);
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
break;
}
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x6b. */
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
/* register operand */
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
case IEMMODE_32BIT:
{
/* register operand */
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
case IEMMODE_64BIT:
{
/* register operand */
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
IEM_MC_END();
}
else
{
/* memory operand */
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
IEM_MC_END();
}
return VINF_SUCCESS;
}
}
/** Opcode 0x6c. */
{
{
IEMOP_MNEMONIC("rep ins Yb,DX");
switch (pIemCpu->enmEffAddrMode)
{
}
}
else
{
IEMOP_MNEMONIC("ins Yb,DX");
switch (pIemCpu->enmEffAddrMode)
{
}
}
}
/** Opcode 0x6d. */
{
{
IEMOP_MNEMONIC("rep ins Yv,DX");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
}
else
{
IEMOP_MNEMONIC("ins Yv,DX");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
}
}
/** Opcode 0x6e. */
{
{
IEMOP_MNEMONIC("rep out DX,Yb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
else
{
IEMOP_MNEMONIC("out DX,Yb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
}
/** Opcode 0x6f. */
{
{
IEMOP_MNEMONIC("rep outs DX,Yv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg);
}
break;
case IEMMODE_64BIT:
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg);
}
break;
}
}
else
{
IEMOP_MNEMONIC("outs DX,Yv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
}
}
/** Opcode 0x70. */
{
IEMOP_MNEMONIC("jo Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x71. */
{
IEMOP_MNEMONIC("jno Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x72. */
{
IEMOP_MNEMONIC("jc/jnae Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x73. */
{
IEMOP_MNEMONIC("jnc/jnb Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x74. */
{
IEMOP_MNEMONIC("je/jz Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x75. */
{
IEMOP_MNEMONIC("jne/jnz Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x76. */
{
IEMOP_MNEMONIC("jbe/jna Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x77. */
{
IEMOP_MNEMONIC("jnbe/ja Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x78. */
{
IEMOP_MNEMONIC("js Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x79. */
{
IEMOP_MNEMONIC("jns Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7a. */
{
IEMOP_MNEMONIC("jp Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7b. */
{
IEMOP_MNEMONIC("jnp Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7c. */
{
IEMOP_MNEMONIC("jl/jnge Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7d. */
{
IEMOP_MNEMONIC("jnl/jge Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7e. */
{
IEMOP_MNEMONIC("jle/jng Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x7f. */
{
IEMOP_MNEMONIC("jnle/jg Jb");
IEM_MC_BEGIN(0, 0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x80. */
{
IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
{
/* register target */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory target */
if (pImpl->pfnLockedU8)
else
{ /* CMP */
}
else
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x81. */
{
IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
{
/* register target */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory target */
if (pImpl->pfnLockedU16)
else
{ /* CMP, TEST */
}
else
IEM_MC_END();
}
break;
}
case IEMMODE_32BIT:
{
{
/* register target */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory target */
if (pImpl->pfnLockedU32)
else
{ /* CMP, TEST */
}
else
IEM_MC_END();
}
break;
}
case IEMMODE_64BIT:
{
{
/* register target */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory target */
if (pImpl->pfnLockedU64)
else
{ /* CMP */
}
else
IEM_MC_END();
}
break;
}
}
return VINF_SUCCESS;
}
/** Opcode 0x82. */
{
IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
}
/** Opcode 0x83. */
{
IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
{
/*
* Register target
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
break;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
break;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
break;
}
}
}
else
{
/*
* Memory target.
*/
if (pImpl->pfnLockedU16)
else
{ /* CMP */
}
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
else
IEM_MC_END();
break;
}
case IEMMODE_32BIT:
{
else
IEM_MC_END();
break;
}
case IEMMODE_64BIT:
{
else
IEM_MC_END();
break;
}
}
}
return VINF_SUCCESS;
}
/** Opcode 0x84. */
{
IEMOP_MNEMONIC("test Eb,Gb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
}
/** Opcode 0x85. */
{
IEMOP_MNEMONIC("test Ev,Gv");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
}
/** Opcode 0x86. */
/** Opcode 0x87. */
/** Opcode 0x88. */
{
IEMOP_MNEMONIC("mov Eb,Gb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(0, 1);
IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
else
{
/*
* We're writing a register to memory.
*/
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x89. */
{
IEMOP_MNEMONIC("mov Ev,Gv");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
else
{
/*
* We're writing a register to memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/** Opcode 0x8a. */
{
IEMOP_MNEMONIC("mov Gb,Eb");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
IEM_MC_END();
}
else
{
/*
* We're loading a register from memory.
*/
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x8b. */
{
IEMOP_MNEMONIC("mov Gv,Ev");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
break;
}
}
else
{
/*
* We're loading a register from memory.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/** Opcode 0x8c. */
{
IEMOP_MNEMONIC("mov Ev,Sw");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* Check that the destination register exists. The REX.R prefix is ignored.
*/
if ( iSegReg > X86_SREG_GS)
return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
* In that case, the operand size is respected and the upper bits are
* cleared (starting with some pentium).
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
}
else
{
/*
* We're saving the register to memory. The access is word sized
* regardless of operand size prefixes.
*/
#if 0 /* not necessary */
#endif
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x8d. */
{
IEMOP_MNEMONIC("lea Gv,M");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
return IEMOP_RAISE_INVALID_LOCK_PREFIX(); /* no register form */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x8e. */
{
IEMOP_MNEMONIC("mov Sw,Ev");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* The practical operand size is 16-bit.
*/
#if 0 /* not necessary */
#endif
/*
* Check that the destination register exists and can be used with this
* instruction. The REX.R prefix is ignored.
*/
if ( iSegReg == X86_SREG_CS
|| iSegReg > X86_SREG_GS)
return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
/*
* If rm is denoting a register, no more instruction bytes.
*/
{
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
}
else
{
/*
* We're loading the register from memory. The access is word sized
* regardless of operand size prefixes.
*/
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0x8f. */
/**
* Common 'xchg reg,rAX' helper.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x90. */
{
{
IEMOP_MNEMONIC("xchg r8,rAX");
}
IEMOP_MNEMONIC("pause");
else
IEMOP_MNEMONIC("nop");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0x91. */
{
IEMOP_MNEMONIC("xchg rCX,rAX");
}
/** Opcode 0x92. */
{
IEMOP_MNEMONIC("xchg rDX,rAX");
}
/** Opcode 0x93. */
{
IEMOP_MNEMONIC("xchg rBX,rAX");
}
/** Opcode 0x94. */
{
IEMOP_MNEMONIC("xchg rSX,rAX");
}
/** Opcode 0x95. */
{
IEMOP_MNEMONIC("xchg rBP,rAX");
}
/** Opcode 0x96. */
{
IEMOP_MNEMONIC("xchg rSI,rAX");
}
/** Opcode 0x97. */
{
IEMOP_MNEMONIC("xchg rDI,rAX");
}
/** Opcode 0x98. */
/** Opcode 0x99. */
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEMOP_MNEMONIC("cwd");
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEMOP_MNEMONIC("cwq");
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEMOP_MNEMONIC("cqo");
IEM_MC_BEGIN(0, 1);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0x9a. */
/** Opcode 0x9b. */
/** Opcode 0x9c. */
{
}
/** Opcode 0x9d. */
{
}
/** Opcode 0x9e. */
/** Opcode 0x9f. */
/**
* Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
* iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
* prefixes. Will return on failures.
* @param a_GCPtrMemOff The variable to store the offset in.
*/
#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
do \
{ \
switch (pIemCpu->enmEffAddrMode) \
{ \
case IEMMODE_16BIT: \
{ \
(a_GCPtrMemOff) = u16Off; \
break; \
} \
case IEMMODE_32BIT: \
{ \
(a_GCPtrMemOff) = u32Off; \
break; \
} \
case IEMMODE_64BIT: \
break; \
} \
} while (0)
/** Opcode 0xa0. */
{
/*
* Get the offset and fend of lock prefixes.
*/
/*
* Fetch AL.
*/
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xa1. */
{
/*
* Get the offset and fend of lock prefixes.
*/
/*
* Fetch rAX.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xa2. */
{
/*
* Get the offset and fend of lock prefixes.
*/
/*
* Store AL.
*/
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xa3. */
{
/*
* Get the offset and fend of lock prefixes.
*/
/*
* Store rAX.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,1);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
IEM_MC_BEGIN(0, 2); \
} IEM_MC_ELSE() { \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
/** Opcode 0xa4. */
{
/*
* Use the C implementation if a repeate prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep movsb Xb,Yb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
IEMOP_MNEMONIC("movsb Xb,Yb");
/*
* Sharing case implementation with movs[wdq] below.
*/
switch (pIemCpu->enmEffAddrMode)
{
}
return VINF_SUCCESS;
}
/** Opcode 0xa5. */
{
/*
* Use the C implementation if a repeate prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep movs Xv,Yv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
}
}
}
IEMOP_MNEMONIC("movs Xv,Yv");
/*
* Annoying double switch here.
* Using ugly macro for implementing the cases, sharing it with movsb.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
return VINF_SUCCESS;
}
/** Opcode 0xa6. */
/** Opcode 0xa7. */
/** Opcode 0xa8. */
{
IEMOP_MNEMONIC("test al,Ib");
}
/** Opcode 0xa9. */
{
IEMOP_MNEMONIC("test rAX,Iz");
}
/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
IEM_MC_BEGIN(0, 2); \
} IEM_MC_ELSE() { \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END(); \
/** Opcode 0xaa. */
{
/*
* Use the C implementation if a repeate prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep stos Yb,al");
switch (pIemCpu->enmEffAddrMode)
{
}
}
IEMOP_MNEMONIC("stos Yb,al");
/*
* Sharing case implementation with stos[wdq] below.
*/
switch (pIemCpu->enmEffAddrMode)
{
}
return VINF_SUCCESS;
}
/** Opcode 0xab. */
{
/*
* Use the C implementation if a repeate prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep stos Yv,rAX");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
}
}
IEMOP_MNEMONIC("stos Yv,rAX");
/*
* Annoying double switch here.
* Using ugly macro for implementing the cases, sharing it with stosb.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
return VINF_SUCCESS;
}
/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
IEM_MC_BEGIN(0, 2); \
} IEM_MC_ELSE() { \
} IEM_MC_ENDIF(); \
IEM_MC_ADVANCE_RIP(); \
IEM_MC_END();
/** Opcode 0xac. */
{
/*
* Use the C implementation if a repeate prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep lodsb al,Xb");
switch (pIemCpu->enmEffAddrMode)
{
}
}
IEMOP_MNEMONIC("lodsb al,Xb");
/*
* Sharing case implementation with stos[wdq] below.
*/
switch (pIemCpu->enmEffAddrMode)
{
}
return VINF_SUCCESS;
}
/** Opcode 0xad. */
{
/*
* Use the C implementation if a repeate prefix is encountered.
*/
{
IEMOP_MNEMONIC("rep lods rAX,Xv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
}
}
IEMOP_MNEMONIC("lods rAX,Xv");
/*
* Annoying double switch here.
* Using ugly macro for implementing the cases, sharing it with lodsb.
*/
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_32BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
case IEMMODE_64BIT:
switch (pIemCpu->enmEffAddrMode)
{
}
break;
}
return VINF_SUCCESS;
}
/** Opcode 0xae. */
/** Opcode 0xaf. */
/**
* Common 'mov r8, imm8' helper.
*/
{
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xb0. */
{
IEMOP_MNEMONIC("mov AL,Ib");
}
/** Opcode 0xb1. */
{
IEMOP_MNEMONIC("mov CL,Ib");
}
/** Opcode 0xb2. */
{
IEMOP_MNEMONIC("mov DL,Ib");
}
/** Opcode 0xb3. */
{
IEMOP_MNEMONIC("mov BL,Ib");
}
/** Opcode 0xb4. */
{
IEMOP_MNEMONIC("mov AH,Ib");
}
/** Opcode 0xb5. */
{
IEMOP_MNEMONIC("mov CH,Ib");
}
/** Opcode 0xb6. */
{
IEMOP_MNEMONIC("mov DH,Ib");
}
/** Opcode 0xb7. */
{
IEMOP_MNEMONIC("mov BH,Ib");
}
/**
* Common 'mov regX,immX' helper.
*/
{
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
break;
}
}
return VINF_SUCCESS;
}
/** Opcode 0xb8. */
{
IEMOP_MNEMONIC("mov rAX,IV");
}
/** Opcode 0xb9. */
{
IEMOP_MNEMONIC("mov rCX,IV");
}
/** Opcode 0xba. */
{
IEMOP_MNEMONIC("mov rDX,IV");
}
/** Opcode 0xbb. */
{
IEMOP_MNEMONIC("mov rBX,IV");
}
/** Opcode 0xbc. */
{
IEMOP_MNEMONIC("mov rSP,IV");
}
/** Opcode 0xbd. */
{
IEMOP_MNEMONIC("mov rBP,IV");
}
/** Opcode 0xbe. */
{
IEMOP_MNEMONIC("mov rSI,IV");
}
/** Opcode 0xbf. */
{
IEMOP_MNEMONIC("mov rDI,IV");
}
/** Opcode 0xc0. */
{
{
case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
}
{
/* register */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xc1. */
{
{
case 4:
#ifdef IEM_VERIFICATION_MODE
#endif
break;
case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
}
{
/* register */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xc2. */
{
IEMOP_MNEMONIC("retn Iw");
}
/** Opcode 0xc3. */
{
IEMOP_MNEMONIC("retn");
}
/** Opcode 0xc4. */
{
IEMOP_MNEMONIC("les Gv,Mp");
}
/** Opcode 0xc5. */
{
IEMOP_MNEMONIC("lds Gv,Mp");
}
/** Opcode 0xc6. */
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
return IEMOP_RAISE_INVALID_LOCK_PREFIX();
IEMOP_MNEMONIC("mov Eb,Ib");
{
/* register access */
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
}
else
{
/* memory access. */
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xc7. */
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
return IEMOP_RAISE_INVALID_LOCK_PREFIX();
IEMOP_MNEMONIC("mov Ev,Iz");
{
/* register access */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory access. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xc8. */
/** Opcode 0xc9. */
/** Opcode 0xca. */
{
IEMOP_MNEMONIC("retf Iw");
}
/** Opcode 0xcb. */
{
IEMOP_MNEMONIC("retf");
}
/** Opcode 0xcc. */
{
}
/** Opcode 0xcd. */
{
}
/** Opcode 0xce. */
{
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xcf. */
{
IEMOP_MNEMONIC("iret");
}
/** Opcode 0xd0. */
{
{
case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
}
{
/* register */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xd1. */
{
{
case 4:
#ifdef IEM_VERIFICATION_MODE
#endif
break;
case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
}
{
/* register */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xd2. */
{
{
case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
}
{
/* register */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xd3. */
{
{
case 4:
#ifdef IEM_VERIFICATION_MODE
#endif
break;
case 6: return IEMOP_RAISE_INVALID_LOCK_PREFIX();
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
}
{
/* register */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
else
{
/* memory */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xd4. */
/** Opcode 0xd5. */
/** Opcode 0xd7. */
/** Opcode 0xd8. */
/** Opcode 0xd9. */
/** Opcode 0xda. */
/** Opcode 0xdb. */
/** Opcode 0xdc. */
/** Opcode 0xdd. */
/** Opcode 0xde. */
/** Opcode 0xdf. */
/** Opcode 0xe0. */
{
IEMOP_MNEMONIC("loopne Jb");
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xe1. */
{
IEMOP_MNEMONIC("loope Jb");
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xe2. */
{
IEMOP_MNEMONIC("loop Jb");
/** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
* using the 32-bit operand size override. How can that be restarted? See
* weird pseudo code in intel manual. */
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xe3. */
{
IEMOP_MNEMONIC("jecxz Jb");
switch (pIemCpu->enmEffAddrMode)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0,0);
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xe4 */
{
IEMOP_MNEMONIC("in eAX,Ib");
}
/** Opcode 0xe5 */
{
IEMOP_MNEMONIC("in eAX,Ib");
}
/** Opcode 0xe6 */
{
IEMOP_MNEMONIC("out Ib,AL");
}
/** Opcode 0xe7 */
{
IEMOP_MNEMONIC("out Ib,eAX");
return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
}
/** Opcode 0xe8. */
{
IEMOP_MNEMONIC("call Jv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
}
case IEMMODE_32BIT:
{
}
case IEMMODE_64BIT:
{
}
default:
}
}
/** Opcode 0xe9. */
{
IEMOP_MNEMONIC("jmp Jv");
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
/** Opcode 0xea. */
{
IEMOP_MNEMONIC("jmp Ap");
/* Decode the far pointer address and pass it on to the far call C implementation. */
else
{
}
}
/** Opcode 0xeb. */
{
IEMOP_MNEMONIC("jmp Jb");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xec */
{
IEMOP_MNEMONIC("in AL,DX");
}
/** Opcode 0xed */
{
IEMOP_MNEMONIC("in eAX,DX");
}
/** Opcode 0xee */
{
IEMOP_MNEMONIC("out DX,AL");
}
/** Opcode 0xef */
{
IEMOP_MNEMONIC("out DX,eAX");
return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
}
/** Opcode 0xf0. */
{
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0xf2. */
{
/* This overrides any previous REPE prefix. */
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0xf3. */
{
/* This overrides any previous REPNE prefix. */
return FNIEMOP_CALL(g_apfnOneByteMap[b]);
}
/** Opcode 0xf4. */
/** Opcode 0xf5. */
/**
*
* @param bRm The RM byte.
* @param pImpl The instruction implementation.
*/
{
{
/* register access */
IEM_MC_BEGIN(2, 0);
IEM_MC_END();
}
else
{
/* memory access. */
else
IEM_MC_END();
}
return VINF_SUCCESS;
}
/**
*
* @param bRm The RM byte.
* @param pImpl The instruction implementation.
*/
{
/* Registers are handled by a common worker. */
/* Memory we do here. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
else
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
else
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xf6 /0. */
{
IEMOP_MNEMONIC("test Eb,Ib");
{
/* register access */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory access. */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xf7 /0. */
{
IEMOP_MNEMONIC("test Ev,Iv");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
{
/* register access */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
else
{
/* memory access. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
}
/** Opcode 0xf6 /4, /5, /6 and /7. */
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
#ifdef IEM_VERIFICATION_MODE
pIemCpu->fMulDivHack = true;
#endif
{
/* register access */
IEM_MC_BEGIN(3, 0);
IEM_MC_END();
}
else
{
/* memory access. */
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
IEM_MC_END();
}
return VINF_SUCCESS;
}
/** Opcode 0xf7 /4, /5, /6 and /7. */
{
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
#ifdef IEM_VERIFICATION_MODE
pIemCpu->fMulDivHack = true;
#endif
{
/* register access */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
else
{
/* memory access. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_32BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
case IEMMODE_64BIT:
{
} IEM_MC_ELSE() {
} IEM_MC_ENDIF();
IEM_MC_END();
return VINF_SUCCESS;
}
}
}
}
/** Opcode 0xf6. */
{
{
case 0:
case 1:
return IEMOP_RAISE_INVALID_LOCK_PREFIX();
case 2:
IEMOP_MNEMONIC("not Eb");
case 3:
IEMOP_MNEMONIC("neg Eb");
case 4:
IEMOP_MNEMONIC("mul Eb");
case 5:
IEMOP_MNEMONIC("imul Eb");
case 6:
IEMOP_MNEMONIC("div Eb");
case 7:
IEMOP_MNEMONIC("idiv Eb");
}
}
/** Opcode 0xf7. */
{
{
case 0:
case 1:
return IEMOP_RAISE_INVALID_LOCK_PREFIX();
case 2:
IEMOP_MNEMONIC("not Ev");
case 3:
IEMOP_MNEMONIC("neg Ev");
case 4:
IEMOP_MNEMONIC("mul Ev");
case 5:
IEMOP_MNEMONIC("imul Ev");
case 6:
IEMOP_MNEMONIC("div Ev");
case 7:
IEMOP_MNEMONIC("idiv Ev");
}
}
/** Opcode 0xf8. */
{
IEMOP_MNEMONIC("clc");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xf9. */
{
IEMOP_MNEMONIC("slc");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xfa. */
{
IEMOP_MNEMONIC("cli");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
}
{
IEMOP_MNEMONIC("sti");
return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
}
/** Opcode 0xfc. */
{
IEMOP_MNEMONIC("cld");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xfd. */
{
IEMOP_MNEMONIC("std");
IEM_MC_BEGIN(0, 0);
IEM_MC_END();
return VINF_SUCCESS;
}
/** Opcode 0xfe. */
{
{
case 0:
IEMOP_MNEMONIC("inc Ev");
case 1:
IEMOP_MNEMONIC("dec Ev");
default:
IEMOP_MNEMONIC("grp4-ud");
return IEMOP_RAISE_INVALID_OPCODE();
}
}
/**
* Opcode 0xff /2.
* @param bRm The RM byte.
*/
{
AssertFailed(); // FNIEMOP_STUB
return VERR_NOT_IMPLEMENTED;
}
/**
* Opcode 0xff /3.
* @param bRm The RM byte.
*/
{
IEMOP_MNEMONIC("callf Ep");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
/* Registers? How?? */
{
/** @todo How the heck does a 'callf eax' work? Probably just have to
* search the docs... */
}
/* Far pointer loaded from memory. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
{
IEM_MC_END();
}
else
{
IEM_MC_END();
}
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_END();
return VINF_SUCCESS;
}
}
/**
* Opcode 0xff /4.
* @param bRm The RM byte.
*/
{
IEMOP_MNEMONIC("callf Ep");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
{
/* The new RIP is taken from a register. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 1);
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 1);
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 1);
return VINF_SUCCESS;
}
}
else
{
/* The new RIP is taken from a register. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
return VINF_SUCCESS;
}
}
}
/**
* Opcode 0xff /5.
* @param bRm The RM byte.
*/
{
/* decode and use a C worker. */
AssertFailed(); // FNIEMOP_STUB
return VERR_NOT_IMPLEMENTED;
}
/**
* Opcode 0xff /6.
* @param bRm The RM byte.
*/
{
IEMOP_MNEMONIC("push Ev");
IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
/* Registers are handled by a common worker. */
/* Memory we do here. */
switch (pIemCpu->enmEffOpSize)
{
case IEMMODE_16BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_32BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
case IEMMODE_64BIT:
IEM_MC_BEGIN(0, 2);
IEM_MC_END();
return VINF_SUCCESS;
}
}
/** Opcode 0xff. */
{
{
case 0:
IEMOP_MNEMONIC("inc Ev");
case 1:
IEMOP_MNEMONIC("dec Ev");
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
IEMOP_MNEMONIC("grp5-ud");
return IEMOP_RAISE_INVALID_OPCODE();
}
}
{
};
/** @} */