IEMAllCImpl.cpp.h revision 8b148a9b18f1df43970276614ab624e85d42f156
/* $Id$ */
/** @file
* IEM - Instruction Implementation in C/C++ (code include).
*/
/*
* Copyright (C) 2011 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/** @name Misc Helpers
* @{
*/
/**
* Checks if we are allowed to access the given I/O port, raising the
* appropriate exceptions if we aren't (or if the I/O bitmap is not
* accessible).
*
* @returns Strict VBox status code.
*
* @param pIemCpu The IEM per CPU data.
* @param pCtx The register context.
* @param u16Port The port number.
* @param cbOperand The operand size.
*/
DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
{
{
}
return VINF_SUCCESS;
}
#if 0
/**
* Calculates the parity bit.
*
* @returns true if the bit is set, false if not.
* @param u8Result The least significant byte of the result.
*/
{
/*
* Parity is set if the number of bits in the least significant byte of
* the result is even.
*/
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
return !(cBits & 1);
}
#endif /* not used */
/**
* Updates the specified flags according to a 8-bit result.
*
* @param pIemCpu The.
* @param u8Result The result to set the flags according to.
* @param fToUpdate The flags to update.
* @param fUndefined The flags that are specified as undefined.
*/
static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
{
}
/** @} */
/** @name C Implementations
* @{
*/
/**
* Implements a 16-bit popa.
*/
{
/*
* The docs are a bit hard to comprehend here, but it looks like we wrap
* around in real mode as long as none of the individual "popa" crosses the
* end of the stack segment. In protected mode we check the whole access
* in one go. For efficiency, only do the word-by-word thing if we're in
* danger of wrapping around.
*/
/** @todo do popa boundary / wrap-around checks. */
{
/* word-by-word */
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
}
else
{
if (rcStrict == VINF_SUCCESS)
{
/* skip sp */
if (rcStrict == VINF_SUCCESS)
{
}
}
}
return rcStrict;
}
/**
* Implements a 32-bit popa.
*/
{
/*
* The docs are a bit hard to comprehend here, but it looks like we wrap
* around in real mode as long as none of the individual "popa" crosses the
* end of the stack segment. In protected mode we check the whole access
* in one go. For efficiency, only do the word-by-word thing if we're in
* danger of wrapping around.
*/
/** @todo do popa boundary / wrap-around checks. */
{
/* word-by-word */
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
#endif
}
}
else
{
if (rcStrict == VINF_SUCCESS)
{
/* skip esp */
if (rcStrict == VINF_SUCCESS)
{
}
}
}
return rcStrict;
}
/**
* Implements a 16-bit pusha.
*/
{
/*
* The docs are a bit hard to comprehend here, but it looks like we wrap
* around in real mode as long as none of the individual "pushd" crosses the
* end of the stack segment. In protected mode we check the whole access
* in one go. For efficiency, only do the word-by-word thing if we're in
* danger of wrapping around.
*/
/** @todo do pusha boundary / wrap-around checks. */
&& IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
{
/* word-by-word */
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
}
else
{
GCPtrBottom--;
if (rcStrict == VINF_SUCCESS)
{
if (rcStrict == VINF_SUCCESS)
{
}
}
}
return rcStrict;
}
/**
* Implements a 32-bit pusha.
*/
{
/*
* The docs are a bit hard to comprehend here, but it looks like we wrap
* around in real mode as long as none of the individual "pusha" crosses the
* end of the stack segment. In protected mode we check the whole access
* in one go. For efficiency, only do the word-by-word thing if we're in
* danger of wrapping around.
*/
/** @todo do pusha boundary / wrap-around checks. */
&& IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
{
/* word-by-word */
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
}
else
{
GCPtrBottom--;
if (rcStrict == VINF_SUCCESS)
{
if (rcStrict == VINF_SUCCESS)
{
}
}
}
return rcStrict;
}
/**
* Implements pushf.
*
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* If we're in V8086 mode some care is required (which is why we're in
* doing this in a C implementation).
*/
if ( (fEfl & X86_EFL_VM)
{
if ( enmEffOpSize != IEMMODE_16BIT
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Ok, clear RF and VM and push the flags.
*/
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
break;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements popf.
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* V8086 is special as usual.
*/
if (fEflOld & X86_EFL_VM)
{
/*
* Almost anything goes if IOPL is 3.
*/
{
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
break;
}
case IEMMODE_32BIT:
if (rcStrict != VINF_SUCCESS)
return rcStrict;
break;
}
}
/*
* Interrupt flag virtualization with CR4.VME=1.
*/
else if ( enmEffOpSize == IEMMODE_16BIT
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
* or before? */
if ( ( (u16Value & X86_EFL_IF)
&& (fEflOld & X86_EFL_VIP))
|| (u16Value & X86_EFL_TF) )
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Not in V8086 mode.
*/
else
{
/* Pop the flags. */
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
break;
}
case IEMMODE_32BIT:
case IEMMODE_64BIT:
if (rcStrict != VINF_SUCCESS)
return rcStrict;
break;
}
/* Merge them with the current flags. */
{
}
{
}
else
{
}
}
/*
* Commit the flags.
*/
return VINF_SUCCESS;
}
/**
* Implements an indirect call.
*
* @param uNewPC The new program counter (RIP) value (loaded from the
* operand).
* @param enmEffOpSize The effective operand size.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements a 16-bit relative call.
*
* @param offDisp The displacment offset.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements a 32-bit indirect call.
*
* @param uNewPC The new program counter (RIP) value (loaded from the
* operand).
* @param enmEffOpSize The effective operand size.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements a 32-bit relative call.
*
* @param offDisp The displacment offset.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements a 64-bit indirect call.
*
* @param uNewPC The new program counter (RIP) value (loaded from the
* operand).
* @param enmEffOpSize The effective operand size.
*/
{
if (!IEM_IS_CANONICAL(uNewPC))
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements a 64-bit relative call.
*
* @param offDisp The displacment offset.
*/
{
if (!IEM_IS_CANONICAL(uNewPC))
return iemRaiseNotCanonical(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements far jumps.
*
* @param uSel The selector.
* @param offSeg The segment offset.
* @param enmEffOpSize The effective operand size.
*/
{
/*
* Real mode and V8086 mode are easy. The only snag seems to be that
* CS.limit doesn't change and the limit check is done against the current
* limit.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
else
/** @todo REM reset the accessed bit (see on jmp far16 after disabling
* PE. Check with VT-x and AMD-V. */
#ifdef IEM_VERIFICATION_MODE
#endif
return VINF_SUCCESS;
}
/*
* Protected mode. Need to parse the specified descriptor...
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Fetch the descriptor. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Is it there? */
{
}
/*
* Deal with it according to its type.
*/
{
/* Only code segments. */
{
Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
}
/* L vs D. */
&& IEM_IS_LONG_MODE(pIemCpu))
{
}
{
{
Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n",
}
}
else
{
{
Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
}
{
Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
}
}
/* Limit check. (Should alternatively check for non-canonical addresses
here, but that is ruled out by offSeg being 32-bit, right?) */
u64Base = 0;
else
{
{
}
}
/*
* Ok, everything checked out fine. Now set the accessed bit before
* committing the result into CS, CSHID and RIP.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
#endif
}
/* commit */
/** @todo check if the hidden bits are loaded correctly for 64-bit
* mode. */
return VINF_SUCCESS;
}
/*
* System selector.
*/
if (IEM_IS_LONG_MODE(pIemCpu))
{
case AMD64_SEL_TYPE_SYS_LDT:
/* Call various functions to do the work. */
default:
}
{
case X86_SEL_TYPE_SYS_LDT:
/* Call various functions to do the work. */
/* Call various functions to do the work. */
default:
}
}
/**
* Implements far calls.
*
* @param uSel The selector.
* @param offSeg The segment offset.
* @param enmOpSize The operand size (in case we need it).
*/
{
void *pvRet;
/*
* Real mode and V8086 mode are easy. The only snag seems to be that
* CS.limit doesn't change and the limit check is done against the current
* limit.
*/
{
/* Check stack first - may #SS(0). */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check the target address range. */
if (offSeg > UINT32_MAX)
return iemRaiseGeneralProtectionFault0(pIemCpu);
/* Everything is fine, push the return address. */
if (enmOpSize == IEMMODE_16BIT)
{
}
else
{
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Branch. */
/** @todo Does REM reset the accessed bit here to? (See on jmp far16
* after disabling PE.) Check with VT-x and AMD-V. */
#ifdef IEM_VERIFICATION_MODE
#endif
return VINF_SUCCESS;
}
}
/**
* Implements retf.
*
* @param enmEffOpSize The effective operand size.
* @param cbPop The amount of arguments to pop from the stack
* (bytes).
*/
{
/*
* Real mode and V8086 mode are easy.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (enmEffOpSize == IEMMODE_32BIT)
{
}
else
{
}
/** @todo check how this is supposed to work if sp=0xfffe. */
/* Check the limit of the new EIP. */
/** @todo Intel pseudo code only does the limit check for 16-bit
* operands, AMD does not make any distinction. What is right? */
/* commit the operation. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo do we load attribs and limit as well? */
if (cbPop)
return VINF_SUCCESS;
}
AssertFailed();
return VERR_NOT_IMPLEMENTED;
}
/**
* Implements retn.
*
* We're doing this in C because of the \#GP that might be raised if the popped
* program counter is out of bounds.
*
* @param enmEffOpSize The effective operand size.
* @param cbPop The amount of arguments to pop from the stack
* (bytes).
*/
{
/* Fetch the RSP from the stack. */
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
NewRip.u = 0;
break;
case IEMMODE_32BIT:
NewRip.u = 0;
break;
case IEMMODE_64BIT:
break;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check the new RSP before loading it. */
/** @todo Should test this as the intel+amd pseudo code doesn't mention half
* of it. The canonical test is performed here and for call. */
if (enmEffOpSize != IEMMODE_64BIT)
{
{
}
}
else
{
if (!IEM_IS_CANONICAL(NewRip.u))
{
return iemRaiseNotCanonical(pIemCpu);
}
}
/* Commit it. */
if (cbPop)
return VINF_SUCCESS;
}
/**
* Implements leave.
*
* We're doing this in C because messing with the stack registers is annoying
* since they depends on SS attributes.
*
* @param enmEffOpSize The effective operand size.
*/
{
/* Calculate the intermediate RSP from RBP and the stack attributes. */
{
/** @todo Check that LEAVE actually preserve the high EBP bits. */
}
else
/* Pop RBP according to the operand size. */
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
NewRbp.u = 0;
break;
case IEMMODE_64BIT:
break;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Commit it. */
return VINF_SUCCESS;
}
/**
* Implements int3 and int XX.
*
* @param u8Int The interrupt vector number.
* @param fIsBpInstr Is it the breakpoint instruction.
*/
{
return iemRaiseXcptOrInt(pIemCpu,
0,
0);
}
/**
* Implements iret for real mode and V8086 mode.
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* iret throws an exception if VME isn't enabled.
*/
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Do the stack bits, but don't commit RSP before everything checks
* out right.
*/
if (enmEffOpSize == IEMMODE_32BIT)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
| X86_EFL_ID;
}
else
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo The intel pseudo code does not indicate what happens to
* reserved flags. We just ignore them. */
}
/** @todo Check how this is supposed to work if sp=0xfffe. */
/*
* Check the limit of the new EIP.
*/
/** @todo Only the AMD pseudo code check the limit here, what's
* right? */
/*
* V8086 checks and flag adjustments
*/
{
{
/* Preserve IOPL and clear RF. */
}
else if ( enmEffOpSize == IEMMODE_16BIT
&& ( !(uNewFlags & X86_EFL_IF)
&& !(uNewFlags & X86_EFL_TF) )
{
/* Move IF to VIF, clear RF and preserve IF and IOPL.*/
uNewFlags &= ~X86_EFL_VIF;
}
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Commit the operation.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo do we load attribs and limit as well? */
return VINF_SUCCESS;
}
/**
* Implements iret for protected mode
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* Nested task return.
*/
{
}
/*
* Normal return.
*/
else
{
/*
* Do the stack bits, but don't commit RSP before everything checks
* out right.
*/
if (enmEffOpSize == IEMMODE_32BIT)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
else
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* What are we returning to?
*/
if ( (uNewFlags & X86_EFL_VM)
{
/* V8086 mode! */
}
else
{
/*
* Protected mode.
*/
/* Read the CS descriptor. */
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Must be a code descriptor. */
{
Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u4Type));
}
{
Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u4Type));
}
/* Privilege checks. */
{
}
{
}
/* Present? */
{
}
/*
* Different level?
*/
{
}
/*
* Same level.
*/
else
{
/* Check EIP. */
{
}
/*
* Commit the changes, marking CS first since it may fail.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
if (enmEffOpSize != IEMMODE_16BIT)
/* Done! */
}
}
}
return VINF_SUCCESS;
}
/**
* Implements iret for long mode
*
* @param enmEffOpSize The effective operand size.
*/
{
//PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
//VBOXSTRICTRC rcStrict;
//uint64_t uNewRsp;
return VERR_NOT_IMPLEMENTED;
}
/**
* Implements iret.
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* Call a mode specific worker.
*/
if (IEM_IS_LONG_MODE(pIemCpu))
}
/**
*
* @param iSegReg The segment register number (valid).
* @param uSel The new selector value.
*/
{
/*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
/*
* Real mode and V8086 mode are easy.
*/
{
/** @todo Does the CPU actually load limits and attributes in the
* jumps... Affects unreal mode. */
return VINF_SUCCESS;
}
/*
* Protected mode.
*
* Check if it's a null segment selector value first, that's OK for DS, ES,
* FS and GS. If not null, then we have to load and parse the descriptor.
*/
{
if (iSegReg == X86_SREG_SS)
{
|| uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* In 64-bit kernel mode, the stack can be 0 because of the way
interrupts are dispatched when in kernel ctx. Just load the
selector value into the register and leave the hidden bits
as is. */
return VINF_SUCCESS;
}
&& iSegReg != X86_SREG_FS
&& iSegReg != X86_SREG_GS)
{
/** @todo figure out what this actually does, it works. Needs
* testcase! */
}
else
{
}
return VINF_SUCCESS;
}
/* Fetch the descriptor. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check GPs first. */
{
}
{
{
}
{
}
{
}
{
Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
}
}
else
{
{
}
!= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
{
#if 0 /* this is what intel says. */
{
Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
}
#else /* this is what makes more sense. */
{
Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
}
{
Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
}
#endif
}
}
/* Is it there? */
{
}
/* The the base and limit. */
&& iSegReg < X86_SREG_FS)
u64Base = 0;
else
/*
* Ok, everything checked out fine. Now set the accessed bit before
* committing the result into the registers.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/* commit */
pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */
/** @todo check if the hidden bits are loaded correctly for 64-bit
* mode. */
return VINF_SUCCESS;
}
/**
* Implements 'mov SReg, r/m'.
*
* @param iSegReg The segment register number (valid).
* @param uSel The new selector value.
*/
{
if (rcStrict == VINF_SUCCESS)
{
if (iSegReg == X86_SREG_SS)
{
}
}
return rcStrict;
}
/**
* Implements 'pop SReg'.
*
* @param iSegReg The segment register number (valid).
* @param enmEffOpSize The efficient operand size (valid).
*/
{
/*
* Read the selector off the stack and join paths with mov ss, reg.
*/
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
{
if (rcStrict == VINF_SUCCESS)
break;
}
case IEMMODE_32BIT:
{
if (rcStrict == VINF_SUCCESS)
break;
}
case IEMMODE_64BIT:
{
if (rcStrict == VINF_SUCCESS)
break;
}
}
/*
* Commit the stack on success.
*/
if (rcStrict == VINF_SUCCESS)
{
if (iSegReg == X86_SREG_SS)
}
return rcStrict;
}
/**
* Implements lgs, lfs, les, lds & lss.
*/
{
/*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
/*
* Use iemCImpl_LoadSReg to do the tricky segment register loading.
*/
/** @todo verify and test that mov, pop and lXs works the segment
* register loading in the exact same way. */
if (rcStrict == VINF_SUCCESS)
{
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
break;
}
}
return rcStrict;
}
/**
* Implements lgdt.
*
* @param iEffSeg The segment of the new ldtr contents
* @param GCPtrEffSrc The address of the new ldtr contents.
* @param enmEffOpSize The effective operand size.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Fetch the limit and base address.
*/
VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
if (rcStrict == VINF_SUCCESS)
{
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
{
}
if (rcStrict == VINF_SUCCESS)
}
return rcStrict;
}
/**
* Implements lidt.
*
* @param iEffSeg The segment of the new ldtr contents
* @param GCPtrEffSrc The address of the new ldtr contents.
* @param enmEffOpSize The effective operand size.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Fetch the limit and base address.
*/
VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
if (rcStrict == VINF_SUCCESS)
{
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
{
}
if (rcStrict == VINF_SUCCESS)
}
return rcStrict;
}
/**
* Implements lldt.
*
* @param uNewLdt The new LDT selector value.
*/
{
/*
* Check preconditions.
*/
{
return iemRaiseUndefinedOpcode(pIemCpu);
}
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
if (uNewLdt & X86_SEL_LDT)
{
}
/*
* Now, loading a NULL selector is easy.
*/
if ((uNewLdt & X86_SEL_MASK) == 0)
{
/** @todo check if the actual value is loaded or if it's always 0. */
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
return VINF_SUCCESS;
}
/*
* Read the descriptor.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check GPs first. */
{
}
{
}
if (!IEM_IS_LONG_MODE(pIemCpu))
else
{
{
}
if (!IEM_IS_CANONICAL(u64Base))
{
}
}
/* NP */
{
}
/*
* It checks out alright, update the registers.
*/
/** @todo check if the actual value is loaded or if the RPL is dropped */
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
return VINF_SUCCESS;
}
/**
* Implements lldt.
*
* @param uNewLdt The new LDT selector value.
*/
{
/*
* Check preconditions.
*/
{
return iemRaiseUndefinedOpcode(pIemCpu);
}
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
if (uNewTr & X86_SEL_LDT)
{
}
if ((uNewTr & X86_SEL_MASK) == 0)
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Read the descriptor.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check GPs first. */
{
}
if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
|| IEM_IS_LONG_MODE(pIemCpu)) )
{
Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
}
if (!IEM_IS_LONG_MODE(pIemCpu))
else
{
{
}
if (!IEM_IS_CANONICAL(u64Base))
{
}
}
/* NP */
{
}
/*
* Set it busy.
* Note! Intel says this should lock down the whole descriptor, but we'll
* restrict our selves to 32-bit for now due to lack of inline
* assembly and such.
*/
void *pvDesc;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
{
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* It checks out alright, update the registers.
*/
/** @todo check if the actual value is loaded or if the RPL is dropped */
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
return VINF_SUCCESS;
}
/**
* Implements mov GReg,CRx.
*
* @param iGReg The general register to store the CRx value in.
* @param iCrReg The CRx register to read (valid).
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/* read it */
switch (iCrReg)
{
case 8:
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
crX = 0xff;
break;
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
}
/* store it */
else
return VINF_SUCCESS;
}
/**
*
* @param iCrReg The CRx register to write (valid).
* @param uNewCrX The new value.
*/
{
int rc;
/*
* Try store it.
* Unfortunately, CPUM only does a tiny bit of the work.
*/
switch (iCrReg)
{
case 0:
{
/*
* Perform checks.
*/
/* Check for reserved bits. */
{
Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Check for invalid combinations. */
if ( (uNewCrX & X86_CR0_PG)
&& !(uNewCrX & X86_CR0_PE) )
{
Log(("Trying to set CR0.PG without CR0.PE\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
if ( !(uNewCrX & X86_CR0_CD)
&& (uNewCrX & X86_CR0_NW) )
{
Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Long mode consistency checks. */
if ( (uNewCrX & X86_CR0_PG)
&& !(uOldCrX & X86_CR0_PG)
{
{
Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
{
Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
}
/** @todo check reserved PDPTR bits as AMD states. */
/*
* Change CR0.
*/
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
{
}
else
/*
* Change EFER.LMA if entering or leaving long mode.
*/
{
if (uNewCrX & X86_CR0_PG)
else
NewEFER &= ~MSR_K6_EFER_LME;
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
}
/*
* Inform PGM.
*/
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
{
{
/* ignore informational status codes */
}
/** @todo Status code management. */
}
else
break;
}
/*
* CR2 can be changed without any restrictions.
*/
case 2:
break;
/*
* CR3 is relatively simple, although AMD and Intel have different
* accounts of how setting reserved bits are handled. We take intel's
* word for the lower bits and AMD's for the high bits (63:52).
*/
/** @todo Testcase: Setting reserved bits in CR3, especially before
* enabling paging. */
case 3:
{
/* check / mask the value. */
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
else
{
Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
}
/** @todo If we're in PAE mode we should check the PDPTRs for
* invalid bits. */
/* Make the change. */
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
{
}
else
/* Inform PGM. */
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
{
{
/* ignore informational status codes */
/** @todo status code management */
}
}
break;
}
/*
* CR4 is a bit more tedious as there are bits which cannot be cleared
* under some circumstances and such.
*/
case 4:
{
/* reserved bits */
//if (xxx)
// fValid |= X86_CR4_VMXE;
//if (xxx)
// fValid |= X86_CR4_OSXSAVE;
{
Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* long mode checks. */
if ( (uOldCrX & X86_CR4_PAE)
&& !(uNewCrX & X86_CR4_PAE)
{
Log(("Trying to set clear CR4.PAE while long mode is active\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Change it.
*/
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
{
}
else
/*
* Notify SELM and PGM.
*/
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
{
/* SELM - VME may change things wrt to the TSS shadowing. */
/* PGM - flushing and mode. */
{
/* ignore informational status codes */
}
/** @todo Status code management. */
}
else
break;
}
/*
* CR8 maps to the APIC TPR.
*/
case 8:
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
break;
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
}
/*
* Advance the RIP on success.
*/
/** @todo Status code management. */
if (rcStrict == VINF_SUCCESS)
return rcStrict;
}
/**
* Implements mov CRx,GReg.
*
* @param iCrReg The CRx register to write (valid).
* @param iGReg The general register to load the DRx value from.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Read the new value from the source register and call common worker.
*/
else
}
/**
*
* @param u16NewMsw The new value.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Compose the new CR0 value and call common worker.
*/
}
/**
* Implements 'CLTS'.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
uNewCr0 &= ~X86_CR0_TS;
}
/**
* Implements mov GReg,DRx.
*
* @param iGReg The general register to store the DRx value in.
* @param iDrReg The DRx register to read (0-7).
*/
{
/*
* Check preconditions.
*/
/* Raise GPs. */
return iemRaiseGeneralProtectionFault0(pIemCpu);
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Raise #DB if general access detect is enabled. */
{
return iemRaiseDebugException(pIemCpu);
}
/*
* Read the debug register and store it in the specified general register.
*/
switch (iDrReg)
{
case 6:
case 4:
break;
case 7:
case 5:
break;
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
}
else
return VINF_SUCCESS;
}
/**
* Implements mov DRx,GReg.
*
* @param iDrReg The DRx register to write (valid).
* @param iGReg The general register to load the DRx value from.
*/
{
/*
* Check preconditions.
*/
return iemRaiseGeneralProtectionFault0(pIemCpu);
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Raise #DB if general access detect is enabled. */
* \#GP? */
{
return iemRaiseDebugException(pIemCpu);
}
/*
* Read the new value from the source register.
*/
else
/*
* Adjust it.
*/
switch (iDrReg)
{
case 0:
case 1:
case 2:
case 3:
/* nothing to adjust */
break;
case 6:
case 4:
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
break;
case 7:
case 5:
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
break;
}
/*
* Do the actual setting.
*/
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
{
}
else
return VINF_SUCCESS;
}
/**
* Implements RDTSC.
*/
{
/*
* Check preconditions.
*/
return iemRaiseUndefinedOpcode(pIemCpu);
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Do the job.
*/
#ifdef IEM_VERIFICATION_MODE
pIemCpu->fIgnoreRaxRdx = true;
#endif
return VINF_SUCCESS;
}
/**
* Implements 'IN eAX, port'.
*
* @param u16Port The source port.
* @param cbReg The register size.
*/
{
/*
* CPL check
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Perform the I/O.
*/
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
if (IOM_SUCCESS(rcStrict))
{
switch (cbReg)
{
default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
}
}
/** @todo massage rcStrict. */
return rcStrict;
}
/**
* Implements 'IN eAX, DX'.
*
* @param cbReg The register size.
*/
{
}
/**
* Implements 'OUT port, eAX'.
*
* @param u16Port The destination port.
* @param cbReg The register size.
*/
{
/*
* CPL check
*/
{
/** @todo I/O port permission bitmap check */
}
/*
* Perform the I/O.
*/
switch (cbReg)
{
default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
}
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
if (IOM_SUCCESS(rc))
{
/** @todo massage rc. */
}
return rc;
}
/**
* Implements 'OUT DX, eAX'.
*
* @param cbReg The register size.
*/
{
}
/**
* Implements 'CLI'.
*/
{
{
{
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* V8086 */
else if (uIopl == 3)
else if ( uIopl < 3
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* real mode */
else
return VINF_SUCCESS;
}
/**
* Implements 'STI'.
*/
{
{
{
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* V8086 */
else if (uIopl == 3)
else if ( uIopl < 3
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* real mode */
else
return VINF_SUCCESS;
}
/**
* Implements 'HLT'.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
return VINF_EM_HALT;
}
/**
* Implements 'CPUID'.
*/
{
CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
return VINF_SUCCESS;
}
/**
* Implements 'AAD'.
*
* @param enmEffOpSize The effective operand size.
*/
{
return VINF_SUCCESS;
}
/**
* Implements 'AAM'.
*
* @param bImm The immediate operand. Cannot be 0.
*/
{
return VINF_SUCCESS;
}
/*
* Instantiate the various string operation combinations.
*/
#define OP_SIZE 8
#define ADDR_SIZE 16
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 8
#define ADDR_SIZE 32
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 8
#define ADDR_SIZE 64
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 16
#define ADDR_SIZE 16
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 16
#define ADDR_SIZE 32
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 16
#define ADDR_SIZE 64
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 32
#define ADDR_SIZE 16
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 32
#define ADDR_SIZE 32
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 32
#define ADDR_SIZE 64
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 64
#define ADDR_SIZE 32
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 64
#define ADDR_SIZE 64
#include "IEMAllCImplStrInstr.cpp.h"
/**
* Implements 'FINIT' and 'FNINIT'.
*
* @param fCheckXcpts Whether to check for umasked pending exceptions or
* not.
*/
{
return iemRaiseDeviceNotAvailable(pIemCpu);
if (fCheckXcpts && TODO )
return iemRaiseMathFault(pIemCpu);
*/
if (iemFRegIsFxSaveFormat(pIemCpu))
{
}
else
{
}
return VINF_SUCCESS;
}
/** @} */