IEMAllCImpl.cpp.h revision 66213c09fb4b2f50823bd0063a4cb171f7d000bf
/* $Id$ */
/** @file
* IEM - Instruction Implementation in C/C++ (code include).
*/
/*
* Copyright (C) 2011-2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/** @name Misc Helpers
* @{
*/
/**
* Worker function for iemHlpCheckPortIOPermission, don't call directly.
*
* @returns Strict VBox status code.
*
* @param pIemCpu The IEM per CPU data.
* @param pCtx The register context.
* @param u16Port The port number.
* @param cbOperand The operand size.
*/
static VBOXSTRICTRC iemHlpCheckPortIOPermissionBitmap(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
{
/* The TSS bits we're interested in are the same on 386 and AMD64. */
/*
* Check the TSS type, 16-bit TSSes doesn't have any I/O permission bitmap.
*/
{
Log(("iemHlpCheckPortIOPermissionBitmap: Port=%#x cb=%d - TSS type %#x (attr=%#x) has no I/O bitmap -> #GP(0)\n",
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Read the bitmap offset (may #PF).
*/
if (rcStrict != VINF_SUCCESS)
{
Log(("iemHlpCheckPortIOPermissionBitmap: Error reading offIoBitmap (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
return rcStrict;
}
/*
* The bit range from u16Port to (u16Port + cbOperand - 1), however intel
* describes the CPU actually reading two bytes regardless of whether the
* bit range crosses a byte boundrary. Thus the + 1 in the test below.
*/
/** @todo check if real CPUs ensures that offBitmap has a minimum value of
* for instance sizeof(X86TSS32). */
{
Log(("iemHlpCheckPortIOPermissionBitmap: offFirstBit=%#x + 1 is beyond u32Limit=%#x -> #GP(0)\n",
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Read the necessary bits.
*/
/** @todo Test the assertion in the intel manual that the CPU reads two
* bytes. The question is how this works wrt to #PF and #GP on the
* 2nd byte when it's not required. */
if (rcStrict != VINF_SUCCESS)
{
Log(("iemHlpCheckPortIOPermissionBitmap: Error reading I/O bitmap @%#x (%Rrc)\n", offFirstBit, VBOXSTRICTRC_VAL(rcStrict)));
return rcStrict;
}
/*
* Perform the check.
*/
{
Log(("iemHlpCheckPortIOPermissionBitmap: u16Port=%#x LB %u - access denied (bm=%#x mask=%#x) -> #GP(0)\n",
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
return VINF_SUCCESS;
}
/**
* Checks if we are allowed to access the given I/O port, raising the
* appropriate exceptions if we aren't (or if the I/O bitmap is not
* accessible).
*
* @returns Strict VBox status code.
*
* @param pIemCpu The IEM per CPU data.
* @param pCtx The register context.
* @param u16Port The port number.
* @param cbOperand The operand size.
*/
DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
{
return VINF_SUCCESS;
}
#if 0
/**
* Calculates the parity bit.
*
* @returns true if the bit is set, false if not.
* @param u8Result The least significant byte of the result.
*/
{
/*
* Parity is set if the number of bits in the least significant byte of
* the result is even.
*/
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
u8Result >>= 1;
return !(cBits & 1);
}
#endif /* not used */
/**
* Updates the specified flags according to a 8-bit result.
*
* @param pIemCpu The IEM state of the calling EMT.
* @param u8Result The result to set the flags according to.
* @param fToUpdate The flags to update.
* @param fUndefined The flags that are specified as undefined.
*/
static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
{
#ifdef IEM_VERIFICATION_MODE_FULL
#endif
}
/**
* Helper used by iret.
*
* @param uCpl The new CPL.
* @param pSReg Pointer to the segment register.
*/
{
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
#else
#endif
}
/**
* Indicates that we have modified the FPU state.
*
* @param pIemCpu The IEM state of the calling EMT.
*/
{
}
/** @} */
/** @name C Implementations
* @{
*/
/**
* Implements a 16-bit popa.
*/
{
/*
* The docs are a bit hard to comprehend here, but it looks like we wrap
* around in real mode as long as none of the individual "popa" crosses the
* end of the stack segment. In protected mode we check the whole access
* in one go. For efficiency, only do the word-by-word thing if we're in
* danger of wrapping around.
*/
/** @todo do popa boundary / wrap-around checks. */
{
/* word-by-word */
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
}
else
{
if (rcStrict == VINF_SUCCESS)
{
/* skip sp */
if (rcStrict == VINF_SUCCESS)
{
}
}
}
return rcStrict;
}
/**
* Implements a 32-bit popa.
*/
{
/*
* The docs are a bit hard to comprehend here, but it looks like we wrap
* around in real mode as long as none of the individual "popa" crosses the
* end of the stack segment. In protected mode we check the whole access
* in one go. For efficiency, only do the word-by-word thing if we're in
* danger of wrapping around.
*/
/** @todo do popa boundary / wrap-around checks. */
{
/* word-by-word */
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
#endif
}
}
else
{
if (rcStrict == VINF_SUCCESS)
{
/* skip esp */
if (rcStrict == VINF_SUCCESS)
{
}
}
}
return rcStrict;
}
/**
* Implements a 16-bit pusha.
*/
{
/*
* The docs are a bit hard to comprehend here, but it looks like we wrap
* around in real mode as long as none of the individual "pushd" crosses the
* end of the stack segment. In protected mode we check the whole access
* in one go. For efficiency, only do the word-by-word thing if we're in
* danger of wrapping around.
*/
/** @todo do pusha boundary / wrap-around checks. */
&& IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
{
/* word-by-word */
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
}
else
{
GCPtrBottom--;
if (rcStrict == VINF_SUCCESS)
{
if (rcStrict == VINF_SUCCESS)
{
}
}
}
return rcStrict;
}
/**
* Implements a 32-bit pusha.
*/
{
/*
* The docs are a bit hard to comprehend here, but it looks like we wrap
* around in real mode as long as none of the individual "pusha" crosses the
* end of the stack segment. In protected mode we check the whole access
* in one go. For efficiency, only do the word-by-word thing if we're in
* danger of wrapping around.
*/
/** @todo do pusha boundary / wrap-around checks. */
&& IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
{
/* word-by-word */
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
if (rcStrict == VINF_SUCCESS)
{
}
}
else
{
GCPtrBottom--;
if (rcStrict == VINF_SUCCESS)
{
if (rcStrict == VINF_SUCCESS)
{
}
}
}
return rcStrict;
}
/**
* Implements pushf.
*
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* If we're in V8086 mode some care is required (which is why we're in
* doing this in a C implementation).
*/
if ( (fEfl & X86_EFL_VM)
{
if ( enmEffOpSize != IEMMODE_16BIT
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Ok, clear RF and VM and push the flags.
*/
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
break;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements popf.
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* V8086 is special as usual.
*/
if (fEflOld & X86_EFL_VM)
{
/*
* Almost anything goes if IOPL is 3.
*/
{
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
break;
}
case IEMMODE_32BIT:
if (rcStrict != VINF_SUCCESS)
return rcStrict;
break;
}
}
/*
* Interrupt flag virtualization with CR4.VME=1.
*/
else if ( enmEffOpSize == IEMMODE_16BIT
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
* or before? */
if ( ( (u16Value & X86_EFL_IF)
&& (fEflOld & X86_EFL_VIP))
|| (u16Value & X86_EFL_TF) )
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Not in V8086 mode.
*/
else
{
/* Pop the flags. */
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
break;
}
case IEMMODE_32BIT:
if (rcStrict != VINF_SUCCESS)
return rcStrict;
break;
case IEMMODE_64BIT:
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
break;
}
}
/* Merge them with the current flags. */
{
}
{
}
else
{
}
}
/*
* Commit the flags.
*/
return VINF_SUCCESS;
}
/**
* Implements an indirect call.
*
* @param uNewPC The new program counter (RIP) value (loaded from the
* operand).
* @param enmEffOpSize The effective operand size.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements a 16-bit relative call.
*
* @param offDisp The displacment offset.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements a 32-bit indirect call.
*
* @param uNewPC The new program counter (RIP) value (loaded from the
* operand).
* @param enmEffOpSize The effective operand size.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* CASM hook for recording interesting indirect calls.
*/
{
if ( enmState == EMSTATE_IEM_THEN_REM
|| enmState == EMSTATE_IEM
|| enmState == EMSTATE_REM)
}
#endif
return VINF_SUCCESS;
}
/**
* Implements a 32-bit relative call.
*
* @param offDisp The displacment offset.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements a 64-bit indirect call.
*
* @param uNewPC The new program counter (RIP) value (loaded from the
* operand).
* @param enmEffOpSize The effective operand size.
*/
{
if (!IEM_IS_CANONICAL(uNewPC))
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements a 64-bit relative call.
*
* @param offDisp The displacment offset.
*/
{
if (!IEM_IS_CANONICAL(uNewPC))
return iemRaiseNotCanonical(pIemCpu);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements far jumps and calls thru task segments (TSS).
*
* @param uSel The selector.
* @param enmBranch The kind of branching we're performing.
* @param enmEffOpSize The effective operand size.
* @param pDesc The descriptor corresponding to @a uSel. The type is
* task gate.
*/
IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
{
#ifndef IEM_IMPLEMENTS_TASKSWITCH
#else
{
Log(("BranchTaskSegment invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
}
/** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
* far calls (see iemCImpl_callf). Most likely in both cases it should be
* checked here, need testcases. */
{
}
return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
#endif
}
/**
* Implements far jumps and calls thru task gates.
*
* @param uSel The selector.
* @param enmBranch The kind of branching we're performing.
* @param enmEffOpSize The effective operand size.
* @param pDesc The descriptor corresponding to @a uSel. The type is
* task gate.
*/
IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
{
#ifndef IEM_IMPLEMENTS_TASKSWITCH
#else
{
Log(("BranchTaskGate invalid priv. uSel=%04x TSS DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
}
/** @todo This is checked earlier for far jumps (see iemCImpl_FarJmp) but not
* far calls (see iemCImpl_callf). Most likely in both cases it should be
* checked here, need testcases. */
{
}
/*
* Fetch the new TSS descriptor from the GDT.
*/
if (uSelTss & X86_SEL_LDT)
{
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
{
}
{
}
return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), enmBranch == IEMBRANCH_JUMP ? IEMTASKSWITCH_JUMP : IEMTASKSWITCH_CALL,
#endif
}
/**
* Implements far jumps and calls thru call gates.
*
* @param uSel The selector.
* @param enmBranch The kind of branching we're performing.
* @param enmEffOpSize The effective operand size.
* @param pDesc The descriptor corresponding to @a uSel. The type is
* call gate.
*/
IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
{
#ifndef IEM_IMPLEMENTS_CALLGATE
#else
/* NB: Far jumps can only do intra-privilege transfers. Far calls support
* inter-privilege calls and are much more complex.
*
* NB: 64-bit call gate has the same type as a 32-bit call gate! If
* EFER.LMA=1, the gate must be 64-bit. Conversely if EFER.LMA=0, the gate
* must be 16-bit or 32-bit.
*/
/** @todo: effective operand size is probably irrelevant here, only the
* call gate bitness matters??
*/
/* Determine the new instruction pointer from the gate descriptor. */
/* Perform DPL checks on the gate descriptor. */
{
Log(("BranchCallGate invalid priv. uSel=%04x Gate DPL=%d CPL=%u Sel RPL=%u -> #GP\n", uSel, pDesc->Legacy.Gate.u2Dpl,
}
/** @todo does this catch NULL selectors, too? */
{
}
/*
* Fetch the target CS descriptor from the GDT or LDT.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Target CS must be a code selector. */
{
Log(("BranchCallGate %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
}
/* Privilege checks on target CS. */
if (enmBranch == IEMBRANCH_JUMP)
{
{
{
Log(("BranchCallGate jump (conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
}
}
else
{
{
Log(("BranchCallGate jump (non-conforming) bad DPL uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
}
}
}
else
{
{
Log(("BranchCallGate call invalid priv. uNewCS=%04x Gate DPL=%d CPL=%u -> #GP\n",
}
}
/* Additional long mode checks. */
if (IEM_IS_LONG_MODE(pIemCpu))
{
{
}
/* L vs D. */
{
}
}
{
}
if (enmBranch == IEMBRANCH_JUMP)
{
/** @todo: This is very similar to regular far jumps; merge! */
/* Jumps are fairly simple... */
/* Chop the high bits off if 16-bit gate (Intel says so). */
/* Limit check for non-long segments. */
u64Base = 0;
else
{
{
Log(("BranchCallGate jump %04x:%08RX64 -> out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewRip, cbLimit));
return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
}
}
/* Canonical address check. */
if (!IEM_IS_CANONICAL(uNewRip))
{
return iemRaiseNotCanonical(pIemCpu);
}
/*
* Ok, everything checked out fine. Now set the accessed bit before
* committing the result into CS, CSHID and RIP.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo check what VT-x and AMD-V does. */
}
/* commit */
}
else
{
/* Calls are much more complicated. */
{
/* More privilege. This is the fun part. */
/*
* Determine new SS:rSP from the TSS.
*/
/* Figure out where the new stack pointer is stored in the TSS. */
if (!IEM_IS_LONG_MODE(pIemCpu))
{
{
}
else
{
}
}
else
{
}
/* Check against TSS limit. */
{
Log(("BranchCallGate inner stack past TSS limit - %u > %u -> #TS(TSS)\n", offNewStack + cbNewStack - 1, pCtx->tr.u32Limit));
}
if (rcStrict != VINF_SUCCESS)
{
return rcStrict;
}
if (!IEM_IS_LONG_MODE(pIemCpu))
{
{
}
else
{
}
}
else
{
/* SS will be a NULL selector, but that's valid. */
}
/* Done with the TSS now. */
if (rcStrict != VINF_SUCCESS)
{
return rcStrict;
}
/* Only used outside of long mode. */
/* If EFER.LMA is 0, there's extra work to do. */
if (!IEM_IS_LONG_MODE(pIemCpu))
{
if ((uNewSS & X86_SEL_MASK_OFF_RPL) == 0)
{
Log(("BranchCallGate new SS NULL -> #TS(NewSS)\n"));
}
/* Grab the new SS descriptor. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Ensure that CS.DPL == SS.RPL == SS.DPL. */
{
}
/* Ensure new SS is a writable data segment. */
{
Log(("BranchCallGate call new SS -> not a writable data selector (u4Type=%#x)\n", DescSS.Legacy.Gen.u4Type));
}
{
}
else
}
else
{
/* Just grab the new (NULL) SS descriptor. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/** @todo: According to Intel, new stack is checked for enough space first,
* then switched. According to AMD, the stack is switched first and
* then pushes might fault!
*/
/** @todo: According to AMD, CS is loaded first, then SS.
* According to Intel, it's the other way around!?
*/
/** @todo: Intel and AMD disagree on when exactly the CPL changes! */
/* Set the accessed bit before committing new SS. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/* Remember the old SS:rSP and their linear address. */
/* Commit new SS:rSP. */
/* Check new stack - may #SS(NewSS). */
if (rcStrict != VINF_SUCCESS)
{
return rcStrict;
}
if (!IEM_IS_LONG_MODE(pIemCpu))
{
{
/* Push the old CS:rIP. */
uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
/* Map the relevant chunk of the old stack. */
rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 4, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
if (rcStrict != VINF_SUCCESS)
{
return rcStrict;
}
/* Copy the parameter (d)words. */
for (int i = 0; i < cbWords; ++i)
/* Unmap the old stack. */
if (rcStrict != VINF_SUCCESS)
{
return rcStrict;
}
/* Push the old SS:rSP. */
}
else
{
/* Push the old CS:rIP. */
/* Map the relevant chunk of the old stack. */
rcStrict = iemMemMap(pIemCpu, &uPtrParmWds.pv, cbWords * 2, UINT8_MAX, GCPtrParmWds, IEM_ACCESS_DATA_R);
if (rcStrict != VINF_SUCCESS)
{
return rcStrict;
}
/* Copy the parameter words. */
for (int i = 0; i < cbWords; ++i)
/* Unmap the old stack. */
if (rcStrict != VINF_SUCCESS)
{
return rcStrict;
}
/* Push the old SS:rSP. */
}
}
else
{
/* For 64-bit gates, no parameters are copied. Just push old SS:rSP and CS:rIP. */
uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
}
if (rcStrict != VINF_SUCCESS)
{
return rcStrict;
}
/* Chop the high bits off if 16-bit gate (Intel says so). */
/* Limit / canonical check. */
if (!IEM_IS_LONG_MODE(pIemCpu))
{
{
return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
}
}
else
{
if (!IEM_IS_CANONICAL(uNewRip))
{
return iemRaiseNotCanonical(pIemCpu);
}
u64Base = 0;
}
/*
* Now set the accessed bit before
* writing the return address to the stack and committing the result into
* CS, CSHID and RIP.
*/
/** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo check what VT-x and AMD-V does. */
}
/* Commit new CS:rIP. */
}
else
{
/* Same privilege. */
/** @todo: This is very similar to regular far calls; merge! */
/* Check stack first - may #SS(0). */
/** @todo check how gate size affects pushing of CS! Does callf 16:32 in
* 16-bit code cause a two or four byte CS to be pushed? */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Chop the high bits off if 16-bit gate (Intel says so). */
/* Limit / canonical check. */
if (!IEM_IS_LONG_MODE(pIemCpu))
{
{
return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, 0);
}
}
else
{
if (!IEM_IS_CANONICAL(uNewRip))
{
return iemRaiseNotCanonical(pIemCpu);
}
u64Base = 0;
}
/*
* Now set the accessed bit before
* writing the return address to the stack and committing the result into
* CS, CSHID and RIP.
*/
/** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo check what VT-x and AMD-V does. */
}
/* stack */
if (!IEM_IS_LONG_MODE(pIemCpu))
{
{
uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when pushing CS? */
}
else
{
}
}
else
{
uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when pushing CS? */
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* commit */
}
}
return VINF_SUCCESS;
#endif
}
/**
* Implements far jumps and calls thru system selectors.
*
* @param uSel The selector.
* @param enmBranch The kind of branching we're performing.
* @param enmEffOpSize The effective operand size.
* @param pDesc The descriptor corresponding to @a uSel.
*/
IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
{
if (IEM_IS_LONG_MODE(pIemCpu))
{
default:
case AMD64_SEL_TYPE_SYS_LDT:
}
{
default:
case X86_SEL_TYPE_SYS_LDT:
}
}
/**
* Implements far jumps.
*
* @param uSel The selector.
* @param offSeg The segment offset.
* @param enmEffOpSize The effective operand size.
*/
{
/*
* Real mode and V8086 mode are easy. The only snag seems to be that
* CS.limit doesn't change and the limit check is done against the current
* limit.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
else
return VINF_SUCCESS;
}
/*
* Protected mode. Need to parse the specified descriptor...
*/
if (!(uSel & X86_SEL_MASK_OFF_RPL))
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Fetch the descriptor. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Is it there? */
{
}
/*
* Deal with it according to its type. We do the standard code selectors
* here and dispatch the system selectors to worker functions.
*/
/* Only code segments. */
{
Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
}
/* L vs D. */
&& IEM_IS_LONG_MODE(pIemCpu))
{
}
{
{
Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
}
}
else
{
{
Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
}
{
Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
}
}
/* Chop the high bits if 16-bit (Intel says so). */
if (enmEffOpSize == IEMMODE_16BIT)
offSeg &= UINT16_MAX;
/* Limit check. (Should alternatively check for non-canonical addresses
here, but that is ruled out by offSeg being 32-bit, right?) */
u64Base = 0;
else
{
{
/** @todo: Intel says this is #GP(0)! */
}
}
/*
* Ok, everything checked out fine. Now set the accessed bit before
* committing the result into CS, CSHID and RIP.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo check what VT-x and AMD-V does. */
}
/* commit */
/** @todo check if the hidden bits are loaded correctly for 64-bit
* mode. */
return VINF_SUCCESS;
}
/**
* Implements far calls.
*
* This very similar to iemCImpl_FarJmp.
*
* @param uSel The selector.
* @param offSeg The segment offset.
* @param enmEffOpSize The operand size (in case we need it).
*/
{
/*
* Real mode and V8086 mode are easy. The only snag seems to be that
* CS.limit doesn't change and the limit check is done against the current
* limit.
*/
{
/* Check stack first - may #SS(0). */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check the target address range. */
if (offSeg > UINT32_MAX)
return iemRaiseGeneralProtectionFault0(pIemCpu);
/* Everything is fine, push the return address. */
if (enmEffOpSize == IEMMODE_16BIT)
{
}
else
{
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Branch. */
return VINF_SUCCESS;
}
/*
* Protected mode. Need to parse the specified descriptor...
*/
if (!(uSel & X86_SEL_MASK_OFF_RPL))
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Fetch the descriptor. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Deal with it according to its type. We do the standard code selectors
* here and dispatch the system selectors to worker functions.
*/
/* Only code segments. */
{
Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
}
/* L vs D. */
&& IEM_IS_LONG_MODE(pIemCpu))
{
}
{
{
Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
}
}
else
{
{
Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
}
{
Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
}
}
/* Is it there? */
{
}
/* Check stack first - may #SS(0). */
/** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
* 16-bit code cause a two or four byte CS to be pushed? */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Chop the high bits if 16-bit (Intel says so). */
if (enmEffOpSize == IEMMODE_16BIT)
offSeg &= UINT16_MAX;
/* Limit / canonical check. */
{
if (!IEM_IS_CANONICAL(offSeg))
{
return iemRaiseNotCanonical(pIemCpu);
}
u64Base = 0;
}
else
{
{
/** @todo: Intel says this is #GP(0)! */
}
}
/*
* Now set the accessed bit before
* writing the return address to the stack and committing the result into
* CS, CSHID and RIP.
*/
/** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo check what VT-x and AMD-V does. */
}
/* stack */
if (enmEffOpSize == IEMMODE_16BIT)
{
}
else if (enmEffOpSize == IEMMODE_32BIT)
{
uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
}
else
{
uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* commit */
/** @todo check if the hidden bits are loaded correctly for 64-bit
* mode. */
return VINF_SUCCESS;
}
/**
* Implements retf.
*
* @param enmEffOpSize The effective operand size.
* @param cbPop The amount of arguments to pop from the stack
* (bytes).
*/
{
/*
* Read the stack values first.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (enmEffOpSize == IEMMODE_16BIT)
{
}
else if (enmEffOpSize == IEMMODE_32BIT)
{
}
else
{
}
/*
* Real mode and V8086 mode are easy.
*/
{
/** @todo check how this is supposed to work if sp=0xfffe. */
/* Check the limit of the new EIP. */
/** @todo Intel pseudo code only does the limit check for 16-bit
* operands, AMD does not make any distinction. What is right? */
/* commit the operation. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo do we load attribs and limit as well? */
if (cbPop)
return VINF_SUCCESS;
}
/*
* Protected mode is complicated, of course.
*/
if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Fetch the descriptor. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Can only return to a code selector. */
{
Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
}
/* L vs D. */
if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
&& IEM_IS_LONG_MODE(pIemCpu))
{
}
{
}
{
{
Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
}
}
else
{
{
Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
}
}
/* Is it there? */
{
}
/*
* Return to outer privilege? (We'll typically have entered via a call gate.)
*/
{
/* Read the outer stack pointer stored *after* the parameters. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (enmEffOpSize == IEMMODE_16BIT)
{
}
else if (enmEffOpSize == IEMMODE_32BIT)
{
}
else
{
}
/* Check for NULL stack selector (invalid in ring-3 and non-long mode)
and read the selector. */
if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
{
{
Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
}
else
{
/* Fetch the descriptor for the new stack segment. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/* Check that RPL of stack and code selectors match. */
{
Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
}
/* Must be a writable data segment. */
{
Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
}
/* L vs D. (Not mentioned by intel.) */
if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
&& IEM_IS_LONG_MODE(pIemCpu))
{
Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
}
{
Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
}
/* Is it there? */
{
Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
}
/* Calc SS limit.*/
/* Is RIP canonical or within CS.limit? */
{
if (!IEM_IS_CANONICAL(uNewRip))
{
Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
return iemRaiseNotCanonical(pIemCpu);
}
u64Base = 0;
}
else
{
{
Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
/** @todo: Intel says this is #GP(0)! */
}
}
/*
* Now set the accessed bit before
* writing the return address to the stack and committing the result into
* CS, CSHID and RIP.
*/
/** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo check what VT-x and AMD-V does. */
}
/** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo check what VT-x and AMD-V does. */
}
/* commit */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (enmEffOpSize == IEMMODE_16BIT)
pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
else
else
/** @todo check if the hidden bits are loaded correctly for 64-bit
* mode. */
if (cbPop)
/* Done! */
}
/*
* Return to the same privilege level
*/
else
{
/* Limit / canonical check. */
{
if (!IEM_IS_CANONICAL(uNewRip))
{
return iemRaiseNotCanonical(pIemCpu);
}
u64Base = 0;
}
else
{
{
/** @todo: Intel says this is #GP(0)! */
}
}
/*
* Now set the accessed bit before
* writing the return address to the stack and committing the result into
* CS, CSHID and RIP.
*/
/** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo check what VT-x and AMD-V does. */
}
/* commit */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (enmEffOpSize == IEMMODE_16BIT)
pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
else
/** @todo check if the hidden bits are loaded correctly for 64-bit
* mode. */
if (cbPop)
}
return VINF_SUCCESS;
}
/**
* Implements retn.
*
* We're doing this in C because of the \#GP that might be raised if the popped
* program counter is out of bounds.
*
* @param enmEffOpSize The effective operand size.
* @param cbPop The amount of arguments to pop from the stack
* (bytes).
*/
{
/* Fetch the RSP from the stack. */
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
NewRip.u = 0;
break;
case IEMMODE_32BIT:
NewRip.u = 0;
break;
case IEMMODE_64BIT:
break;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check the new RSP before loading it. */
/** @todo Should test this as the intel+amd pseudo code doesn't mention half
* of it. The canonical test is performed here and for call. */
if (enmEffOpSize != IEMMODE_64BIT)
{
{
}
}
else
{
if (!IEM_IS_CANONICAL(NewRip.u))
{
return iemRaiseNotCanonical(pIemCpu);
}
}
/* Commit it. */
if (cbPop)
return VINF_SUCCESS;
}
/**
* Implements enter.
*
* We're doing this in C because the instruction is insane, even for the
* u8NestingLevel=0 case dealing with the stack is tedious.
*
* @param enmEffOpSize The effective operand size.
*/
{
/* Push RBP, saving the old value in TmpRbp. */
if (enmEffOpSize == IEMMODE_64BIT)
{
}
{
}
else
{
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Copy the parameters (aka nesting levels by Intel). */
cParameters &= 0x1f;
if (cParameters > 0)
{
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
else
do
{
if (rcStrict != VINF_SUCCESS)
break;
break;
case IEMMODE_32BIT:
else
do
{
if (rcStrict != VINF_SUCCESS)
break;
break;
case IEMMODE_64BIT:
TmpRbp.u -= 8;
do
{
if (rcStrict != VINF_SUCCESS)
break;
break;
}
if (rcStrict != VINF_SUCCESS)
return VINF_SUCCESS;
/* Push the new RBP */
if (enmEffOpSize == IEMMODE_64BIT)
else
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/* Recalc RSP. */
/** @todo Should probe write access at the new RSP according to AMD. */
/* Commit it. */
return VINF_SUCCESS;
}
/**
* Implements leave.
*
* We're doing this in C because messing with the stack registers is annoying
* since they depends on SS attributes.
*
* @param enmEffOpSize The effective operand size.
*/
{
/* Calculate the intermediate RSP from RBP and the stack attributes. */
else
{
/** @todo Check that LEAVE actually preserve the high EBP bits. */
}
/* Pop RBP according to the operand size. */
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
NewRbp.u = 0;
break;
case IEMMODE_64BIT:
break;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Commit it. */
return VINF_SUCCESS;
}
/**
* Implements int3 and int XX.
*
* @param u8Int The interrupt vector number.
* @param fIsBpInstr Is it the breakpoint instruction.
*/
{
return iemRaiseXcptOrInt(pIemCpu,
0,
0);
}
/**
* Implements iret for real mode and V8086 mode.
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* iret throws an exception if VME isn't enabled.
*/
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Do the stack bits, but don't commit RSP before everything checks
* out right.
*/
if (enmEffOpSize == IEMMODE_32BIT)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (uNewEip > UINT16_MAX)
return iemRaiseGeneralProtectionFault0(pIemCpu);
| X86_EFL_ID;
}
else
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/** @todo The intel pseudo code does not indicate what happens to
* reserved flags. We just ignore them. */
}
/** @todo Check how this is supposed to work if sp=0xfffe. */
/*
* Check the limit of the new EIP.
*/
/** @todo Only the AMD pseudo code check the limit here, what's
* right? */
/*
* V8086 checks and flag adjustments
*/
{
{
/* Preserve IOPL and clear RF. */
}
else if ( enmEffOpSize == IEMMODE_16BIT
&& ( !(uNewFlags & X86_EFL_IF)
&& !(uNewFlags & X86_EFL_TF) )
{
/* Move IF to VIF, clear RF and preserve IF and IOPL.*/
uNewFlags &= ~X86_EFL_VIF;
}
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Commit the operation.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
#ifdef DBGFTRACE_ENABLED
RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx",
#endif
/** @todo do we load attribs and limit as well? */
return VINF_SUCCESS;
}
/**
* Loads a segment register when entering V8086 mode.
*
* @param pSReg The segment register.
* @param uSeg The segment to load.
*/
{
pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
/** @todo Testcase: Check if VT-x really needs this and what it does itself when
* IRET'ing to V8086. */
}
/**
* Implements iret for protected mode returning to V8086 mode.
*
* @param pCtx Pointer to the CPU context.
* @param uNewEip The new EIP.
* @param uNewCs The new CS.
* @param uNewFlags The new EFLAGS.
* @param uNewRsp The RSP after the initial IRET frame.
*
* @note This can only be a 32-bit iret du to the X86_EFL_VM position.
*/
{
/*
* Pop the V8086 specific frame bits off the stack.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Commit the operation.
*/
#ifdef DBGFTRACE_ENABLED
RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x",
#endif
return VINF_SUCCESS;
}
/**
* Implements iret for protected mode returning via a nested task.
*
* @param enmEffOpSize The effective operand size.
*/
{
#ifndef IEM_IMPLEMENTS_TASKSWITCH
#else
/*
* Read the segment selector in the link-field of the current TSS.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Fetch the returning task's TSS descriptor from the GDT.
*/
if (uSelRet & X86_SEL_LDT)
{
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
{
}
{
Log(("iret_prot_NestedTask TSS is not busy. uSelRet=%04x DescType=%#x -> #TS\n", uSelRet, TssDesc.Legacy.Gate.u4Type));
}
{
}
return iemTaskSwitch(pIemCpu, pIemCpu->CTX_SUFF(pCtx), IEMTASKSWITCH_IRET, uNextEip, 0 /* fFlags */, 0 /* uErr */,
#endif
}
/**
* Implements iret for protected mode
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* Nested task return.
*/
/*
* Normal return.
*
* Do the stack bits, but don't commit RSP before everything checks
* out right.
*/
if (enmEffOpSize == IEMMODE_32BIT)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
else
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* We're hopefully not returning to V8086 mode...
*/
if ( (uNewFlags & X86_EFL_VM)
{
}
/*
* Protected mode.
*/
/* Read the CS descriptor. */
if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
if (rcStrict != VINF_SUCCESS)
{
Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
return rcStrict;
}
/* Must be a code descriptor. */
{
Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
}
{
Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
}
#ifdef VBOX_WITH_RAW_MODE_NOT_R0
/* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */
{
{
&& ( !EMIsRawRing1Enabled(pVM)
{
}
# ifdef LOG_ENABLED
# endif
}
{
Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));
}
}
#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
/* Privilege checks. */
{
}
{
}
/* Present? */
{
}
/*
* Return to outer level?
*/
{
if (enmEffOpSize == IEMMODE_32BIT)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
else
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Read the SS descriptor. */
if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
{
Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS, X86_XCPT_GP); /** @todo Correct exception? */
if (rcStrict != VINF_SUCCESS)
{
Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
return rcStrict;
}
/* Privilege checks. */
{
}
{
Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
}
/* Must be a writeable data segment descriptor. */
{
Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
}
{
Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
}
/* Present? */
{
}
/* Check EIP. */
{
Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
/** @todo: Which is it, #GP(0) or #GP(sel)? */
}
/*
* Commit the changes, marking CS and SS accessed first since
* that may fail.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
if (enmEffOpSize != IEMMODE_16BIT)
fEFlagsNew &= ~fEFlagsMask;
#ifdef DBGFTRACE_ENABLED
RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x",
#endif
else
/* Done! */
}
/*
* Return to the same level.
*/
else
{
/* Check EIP. */
{
/** @todo: Which is it, #GP(0) or #GP(sel)? */
}
/*
* Commit the changes, marking CS first since it may fail.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
if (enmEffOpSize != IEMMODE_16BIT)
NewEfl.u &= ~fEFlagsMask;
#ifdef DBGFTRACE_ENABLED
RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx",
#endif
/* Done! */
}
return VINF_SUCCESS;
}
/**
* Implements iret for long mode
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* Nested task return is not supported in long mode.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Normal return.
*
* Do the stack bits, but don't commit RSP before everything checks
* out right.
*/
if (enmEffOpSize == IEMMODE_64BIT)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
else if (enmEffOpSize == IEMMODE_32BIT)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
else
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
Log2(("iretq stack: cs:rip=%04x:%016RX16 rflags=%016RX16 ss:rsp=%04x:%016RX16\n",
/*
* Check stuff.
*/
/* Read the CS descriptor. */
if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
{
Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid CS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
if (rcStrict != VINF_SUCCESS)
{
Log(("iret %04x:%016RX64/%04x:%016RX64 - rcStrict=%Rrc when fetching CS\n",
return rcStrict;
}
/* Must be a code descriptor. */
{
Log(("iret %04x:%016RX64/%04x:%016RX64 - CS is not a code segment T=%u T=%#xu -> #GP\n",
}
/* Privilege checks. */
{
Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < CPL (%d) -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp, pIemCpu->uCpl));
}
{
Log(("iret %04x:%016RX64/%04x:%016RX64 - RPL < DPL (%d) -> #GP\n",
}
/* Present? */
{
Log(("iret %04x:%016RX64/%04x:%016RX64 - CS not present -> #NP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
}
/* Read the SS descriptor. */
if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
{
|| DescCS.Legacy.Gen.u1DefBig /** @todo exactly how does iret (and others) behave with u1Long=1 and u1DefBig=1? \#GP(sel)? */
{
Log(("iret %04x:%016RX64/%04x:%016RX64 -> invalid SS selector, #GP(0)\n", uNewCs, uNewRip, uNewSs, uNewRsp));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
}
else
{
rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSs, X86_XCPT_GP); /** @todo Correct exception? */
if (rcStrict != VINF_SUCCESS)
{
Log(("iret %04x:%016RX64/%04x:%016RX64 - %Rrc when fetching SS\n",
return rcStrict;
}
}
/* Privilege checks. */
{
Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewRip, uNewSs, uNewRsp));
}
if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
else
{
{
Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS.DPL (%d) != CS.RPL -> #GP\n",
}
/* Must be a writeable data segment descriptor. */
{
Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS is system segment (%#x) -> #GP\n",
}
{
Log(("iret %04x:%016RX64/%04x:%016RX64 - not writable data segment (%#x) -> #GP\n",
}
/* Present? */
{
Log(("iret %04x:%016RX64/%04x:%016RX64 -> SS not present -> #SS\n", uNewCs, uNewRip, uNewSs, uNewRsp));
}
}
/* Check EIP. */
{
if (!IEM_IS_CANONICAL(uNewRip))
{
Log(("iret %04x:%016RX64/%04x:%016RX64 -> RIP is not canonical -> #GP(0)\n",
}
}
else
{
{
Log(("iret %04x:%016RX64/%04x:%016RX64 -> EIP is out of bounds (%#x) -> #GP(0)\n",
/** @todo: Which is it, #GP(0) or #GP(sel)? */
}
}
/*
* Commit the changes, marking CS and SS accessed first since
* that may fail.
*/
/** @todo where exactly are these actually marked accessed by a real CPU? */
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
if (enmEffOpSize != IEMMODE_16BIT)
fEFlagsNew &= ~fEFlagsMask;
#ifdef DBGFTRACE_ENABLED
RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx",
#endif
else
if (!(uNewSs & X86_SEL_MASK_OFF_RPL))
{
Log2(("iretq new SS: NULL\n"));
}
else
{
Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u));
}
{
}
return VINF_SUCCESS;
}
/**
* Implements iret.
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* First, clear NMI blocking, if any, before causing any exceptions.
*/
/*
* Call a mode specific worker.
*/
if (IEM_IS_LONG_MODE(pIemCpu))
}
/**
* Implements SYSCALL (AMD and Intel64).
*
* @param enmEffOpSize The effective operand size.
*/
{
/*
* Check preconditions.
*
* Note that CPUs described in the documentation may load a few odd values
* into CS and SS than we allow here. This has yet to be checked on real
* hardware.
*/
{
Log(("syscall: Not enabled in EFER -> #UD\n"));
return iemRaiseUndefinedOpcode(pIemCpu);
}
{
Log(("syscall: Protected mode is required -> #GP(0)\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
{
Log(("syscall: Only available in long mode on intel -> #UD\n"));
return iemRaiseUndefinedOpcode(pIemCpu);
}
/** @todo verify RPL ignoring and CS=0xfff8 (i.e. SS == 0). */
/** @todo what about LDT selectors? Shouldn't matter, really. */
{
Log(("syscall: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Long mode and legacy mode differs. */
if (CPUMIsGuestInLongModeEx(pCtx))
{
/* This test isn't in the docs, but I'm not trusting the guys writing
the MSRs to have validated the values as canonical like they should. */
if (!IEM_IS_CANONICAL(uNewRip))
{
Log(("syscall: Only available in long mode on intel -> #UD\n"));
return iemRaiseUndefinedOpcode(pIemCpu);
}
/*
* Commit it.
*/
Log(("syscall: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64\n", pCtx->cs, pCtx->rip, pCtx->rflags.u, uNewCs, uNewRip));
pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
}
else
{
/*
* Commit it.
*/
Log(("syscall: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n",
pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, (uint32_t)(pCtx->msrSTAR & MSR_K6_STAR_SYSCALL_EIP_MASK)));
pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC;
pCtx->ss.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_RW_ACC;
}
return VINF_SUCCESS;
}
/**
* Implements SYSRET (AMD and Intel64).
*/
{
/*
* Check preconditions.
*
* Note that CPUs described in the documentation may load a few odd values
* into CS and SS than we allow here. This has yet to be checked on real
* hardware.
*/
{
Log(("sysret: Not enabled in EFER -> #UD\n"));
return iemRaiseUndefinedOpcode(pIemCpu);
}
{
Log(("sysret: Only available in long mode on intel -> #UD\n"));
return iemRaiseUndefinedOpcode(pIemCpu);
}
{
Log(("sysret: Protected mode is required -> #GP(0)\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/** @todo Does SYSRET verify CS != 0 and SS != 0? Neither is valid in ring-3. */
uNewCs += 16;
{
Log(("sysret: msrSTAR.CS = 0 or SS = 0 -> #GP(0)\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Commit it.
*/
if (CPUMIsGuestInLongModeEx(pCtx))
{
{
Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%016RX64 [r11=%#llx]\n",
/* Note! We disregard intel manual regarding the RCX cananonical
check, ask intel+xen why AMD doesn't do it. */
pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_L | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
| (3 << X86DESCATTR_DPL_SHIFT);
}
else
{
Log(("sysret: %04x:%016RX64 [efl=%#llx] -> %04x:%08RX32 [r11=%#llx]\n",
pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
| (3 << X86DESCATTR_DPL_SHIFT);
}
/** @todo testcase: See what kind of flags we can make SYSRET restore and
* what it really ignores. RF and VM are hinted at being zero, by AMD. */
}
else
{
Log(("sysret: %04x:%08RX32 [efl=%#x] -> %04x:%08RX32\n", pCtx->cs, pCtx->eip, pCtx->eflags.u, uNewCs, pCtx->ecx));
pCtx->cs.Attr.u = X86DESCATTR_P | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_DT | X86_SEL_TYPE_ER_ACC
| (3 << X86DESCATTR_DPL_SHIFT);
}
/* The SS hidden bits remains unchanged says AMD. To that I say "Yeah, right!". */
/** @todo Testcase: verify that SS.u1Long and SS.u1DefBig are left unchanged
* on sysret. */
return VINF_SUCCESS;
}
/**
*
* @param iSegReg The segment register number (valid).
* @param uSel The new selector value.
*/
{
/*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
/*
* Real mode and V8086 mode are easy.
*/
{
#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
/** @todo Does the CPU actually load limits and attributes in the
* jumps... Affects unreal mode. */
#endif
return VINF_SUCCESS;
}
/*
* Protected mode.
*
* Check if it's a null segment selector value first, that's OK for DS, ES,
* FS and GS. If not null, then we have to load and parse the descriptor.
*/
if (!(uSel & X86_SEL_MASK_OFF_RPL))
{
if (iSegReg == X86_SREG_SS)
{
/* In 64-bit kernel mode, the stack can be 0 because of the way
interrupts are dispatched. AMD seems to have a slighly more
relaxed relationship to SS.RPL than intel does. */
/** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? There is a testcase (bs-cpu-xcpt-1), but double check this! */
&& !IEM_IS_GUEST_CPU_AMD(pIemCpu)) )
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
}
if (iSegReg == X86_SREG_SS)
return VINF_SUCCESS;
}
/* Fetch the descriptor. */
VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel, X86_XCPT_GP); /** @todo Correct exception? */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check GPs first. */
{
}
{
{
}
{
}
{
Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
}
}
else
{
{
}
!= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
{
#if 0 /* this is what intel says. */
{
Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
}
#else /* this is what makes more sense. */
{
Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
}
{
Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
}
#endif
}
}
/* Is it there? */
{
}
/* The base and limit. */
&& iSegReg < X86_SREG_FS)
u64Base = 0;
else
/*
* Ok, everything checked out fine. Now set the accessed bit before
* committing the result into the registers.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
/* commit */
/** @todo check if the hidden bits are loaded correctly for 64-bit
* mode. */
return VINF_SUCCESS;
}
/**
* Implements 'mov SReg, r/m'.
*
* @param iSegReg The segment register number (valid).
* @param uSel The new selector value.
*/
{
if (rcStrict == VINF_SUCCESS)
{
if (iSegReg == X86_SREG_SS)
{
}
}
return rcStrict;
}
/**
* Implements 'pop SReg'.
*
* @param iSegReg The segment register number (valid).
* @param enmEffOpSize The efficient operand size (valid).
*/
{
/*
* Read the selector off the stack and join paths with mov ss, reg.
*/
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
{
if (rcStrict == VINF_SUCCESS)
break;
}
case IEMMODE_32BIT:
{
if (rcStrict == VINF_SUCCESS)
break;
}
case IEMMODE_64BIT:
{
if (rcStrict == VINF_SUCCESS)
break;
}
}
/*
* Commit the stack on success.
*/
if (rcStrict == VINF_SUCCESS)
{
if (iSegReg == X86_SREG_SS)
}
return rcStrict;
}
/**
* Implements lgs, lfs, les, lds & lss.
*/
{
/*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
/*
* Use iemCImpl_LoadSReg to do the tricky segment register loading.
*/
/** @todo verify and test that mov, pop and lXs works the segment
* register loading in the exact same way. */
if (rcStrict == VINF_SUCCESS)
{
switch (enmEffOpSize)
{
case IEMMODE_16BIT:
break;
case IEMMODE_32BIT:
break;
case IEMMODE_64BIT:
break;
}
}
return rcStrict;
}
/**
* Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory.
*
* @retval VINF_SUCCESS on success.
* @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok.
* @retval iemMemFetchSysU64 return value.
*
* @param pIemCpu The IEM state of the calling EMT.
* @param uSel The selector value.
* @param fAllowSysDesc Whether system descriptors are OK or not.
* @param pDesc Where to return the descriptor on success.
*/
static VBOXSTRICTRC iemCImpl_LoadDescHelper(PIEMCPU pIemCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc)
{
return VINF_IEM_SELECTOR_NOT_OK;
/* Within the table limits? */
if (uSel & X86_SEL_LDT)
{
return VINF_IEM_SELECTOR_NOT_OK;
}
else
{
return VINF_IEM_SELECTOR_NOT_OK;
}
/* Fetch the descriptor. */
VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
if (rcStrict != VINF_SUCCESS)
return rcStrict;
{
if (!fAllowSysDesc)
return VINF_IEM_SELECTOR_NOT_OK;
if (CPUMIsGuestInLongModeEx(pCtx))
{
rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
}
return VINF_SUCCESS;
}
/**
* Implements verr (fWrite = false) and verw (fWrite = true).
*/
{
/** @todo figure whether the accessed bit is set or not. */
bool fAccessible = true;
if (rcStrict == VINF_SUCCESS)
{
/* Check the descriptor, order doesn't matter much here. */
fAccessible = false;
else
{
if ( fWrite
fAccessible = false;
/** @todo testcase for the conforming behavior. */
!= (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
{
fAccessible = false;
fAccessible = false;
}
}
}
else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
fAccessible = false;
else
return rcStrict;
/* commit */
return VINF_SUCCESS;
}
/**
* Implements LAR and LSL with 64-bit operand size.
*
* @returns VINF_SUCCESS.
* @param pu16Dst Pointer to the destination register.
* @param uSel The selector to load details for.
* @param pEFlags Pointer to the eflags register.
* @param fIsLar true = LAR, false = LSL.
*/
IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
{
/** @todo figure whether the accessed bit is set or not. */
bool fDescOk = true;
if (rcStrict == VINF_SUCCESS)
{
/*
* Check the descriptor type.
*/
{
{
fDescOk = false;
else
{
/** @todo Intel lists 0 as valid for LSL, verify whether that's correct */
case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */
break;
break;
default:
fDescOk = false;
break;
}
}
else
{
{
case X86_SEL_TYPE_SYS_LDT:
break;
break;
default:
fDescOk = false;
break;
}
}
}
if (fDescOk)
{
/*
*/
/** @todo testcase for the conforming behavior. */
if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)
{
fDescOk = false;
fDescOk = false;
}
}
if (fDescOk)
{
/*
* All fine, start committing the result.
*/
if (fIsLar)
else
}
}
else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK)
fDescOk = false;
else
return rcStrict;
/* commit flags value and advance rip. */
return VINF_SUCCESS;
}
/**
* Implements LAR and LSL with 16-bit operand size.
*
* @returns VINF_SUCCESS.
* @param pu16Dst Pointer to the destination register.
* @param u16Sel The selector to load details for.
* @param pEFlags Pointer to the eflags register.
* @param fIsLar true = LAR, false = LSL.
*/
IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar)
{
return VINF_SUCCESS;
}
/**
* Implements lgdt.
*
* @param iEffSeg The segment of the new gdtr contents
* @param GCPtrEffSrc The address of the new gdtr contents.
* @param enmEffOpSize The effective operand size.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Fetch the limit and base address.
*/
VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
if (rcStrict == VINF_SUCCESS)
{
else
{
}
if (rcStrict == VINF_SUCCESS)
}
return rcStrict;
}
/**
* Implements sgdt.
*
* @param iEffSeg The segment where to store the gdtr content.
* @param GCPtrEffDst The address where to store the gdtr content.
* @param enmEffOpSize The effective operand size.
*/
{
/*
* Join paths with sidt.
* Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
* you really must know.
*/
VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
if (rcStrict == VINF_SUCCESS)
return rcStrict;
}
/**
* Implements lidt.
*
* @param iEffSeg The segment of the new idtr contents
* @param GCPtrEffSrc The address of the new idtr contents.
* @param enmEffOpSize The effective operand size.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Fetch the limit and base address.
*/
VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
if (rcStrict == VINF_SUCCESS)
{
else
{
}
}
return rcStrict;
}
/**
* Implements sidt.
*
* @param iEffSeg The segment where to store the idtr content.
* @param GCPtrEffDst The address where to store the idtr content.
* @param enmEffOpSize The effective operand size.
*/
{
/*
* Join paths with sgdt.
* Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
* you really must know.
*/
VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
if (rcStrict == VINF_SUCCESS)
return rcStrict;
}
/**
* Implements lldt.
*
* @param uNewLdt The new LDT selector value.
*/
{
/*
* Check preconditions.
*/
{
return iemRaiseUndefinedOpcode(pIemCpu);
}
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
if (uNewLdt & X86_SEL_LDT)
{
}
/*
* Now, loading a NULL selector is easy.
*/
if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
{
else
{
}
else if (IEM_IS_GUEST_CPU_AMD(pIemCpu))
{
/* AMD-V seems to leave the base and limit alone. */
}
else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pIemCpu))
{
/* VT-x (Intel 3960x) seems to be doing the following. */
}
return VINF_SUCCESS;
}
/*
* Read the descriptor.
*/
VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt, X86_XCPT_GP); /** @todo Correct exception? */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check GPs first. */
{
}
{
}
if (!IEM_IS_LONG_MODE(pIemCpu))
else
{
{
}
if (!IEM_IS_CANONICAL(u64Base))
{
}
}
/* NP */
{
}
/*
* It checks out alright, update the registers.
*/
/** @todo check if the actual value is loaded or if the RPL is dropped */
else
return VINF_SUCCESS;
}
/**
* Implements lldt.
*
* @param uNewLdt The new LDT selector value.
*/
{
/*
* Check preconditions.
*/
{
return iemRaiseUndefinedOpcode(pIemCpu);
}
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
if (uNewTr & X86_SEL_LDT)
{
}
if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Read the descriptor.
*/
VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr, X86_XCPT_GP); /** @todo Correct exception? */
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Check GPs first. */
{
}
if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
|| IEM_IS_LONG_MODE(pIemCpu)) )
{
Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
}
if (!IEM_IS_LONG_MODE(pIemCpu))
else
{
{
}
if (!IEM_IS_CANONICAL(u64Base))
{
}
}
/* NP */
{
}
/*
* Set it busy.
* Note! Intel says this should lock down the whole descriptor, but we'll
* restrict our selves to 32-bit for now due to lack of inline
* assembly and such.
*/
void *pvDesc;
rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt + (uNewTr & X86_SEL_MASK_OFF_RPL), IEM_ACCESS_DATA_RW);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
{
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* It checks out alright, update the registers.
*/
/** @todo check if the actual value is loaded or if the RPL is dropped */
else
return VINF_SUCCESS;
}
/**
* Implements mov GReg,CRx.
*
* @param iGReg The general register to store the CRx value in.
* @param iCrReg The CRx register to read (valid).
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/* read it */
switch (iCrReg)
{
case 8:
{
if (RT_SUCCESS(rc))
else
crX = 0;
break;
}
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
}
/* store it */
else
return VINF_SUCCESS;
}
/**
*
* @param iCrReg The CRx register to write (valid).
* @param uNewCrX The new value.
*/
{
int rc;
/*
* Try store it.
* Unfortunately, CPUM only does a tiny bit of the work.
*/
switch (iCrReg)
{
case 0:
{
/*
* Perform checks.
*/
/* Check for reserved bits. */
{
Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Check for invalid combinations. */
if ( (uNewCrX & X86_CR0_PG)
&& !(uNewCrX & X86_CR0_PE) )
{
Log(("Trying to set CR0.PG without CR0.PE\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
if ( !(uNewCrX & X86_CR0_CD)
&& (uNewCrX & X86_CR0_NW) )
{
Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Long mode consistency checks. */
if ( (uNewCrX & X86_CR0_PG)
&& !(uOldCrX & X86_CR0_PG)
{
{
Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
{
Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
}
/** @todo check reserved PDPTR bits as AMD states. */
/*
* Change CR0.
*/
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
/*
* Change EFER.LMA if entering or leaving long mode.
*/
{
if (uNewCrX & X86_CR0_PG)
else
NewEFER &= ~MSR_K6_EFER_LMA;
else
}
/*
* Inform PGM.
*/
{
{
/* ignore informational status codes */
}
}
else
#ifdef IN_RC
/* Return to ring-3 for rescheduling if WP or AM changes. */
if ( rcStrict == VINF_SUCCESS
#endif
break;
}
/*
* CR2 can be changed without any restrictions.
*/
case 2:
break;
/*
* CR3 is relatively simple, although AMD and Intel have different
* accounts of how setting reserved bits are handled. We take intel's
* word for the lower bits and AMD's for the high bits (63:52).
*/
/** @todo Testcase: Setting reserved bits in CR3, especially before
* enabling paging. */
case 3:
{
/* check / mask the value. */
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
else
{
Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
}
/** @todo If we're in PAE mode we should check the PDPTRs for
* invalid bits. */
/* Make the change. */
{
}
else
/* Inform PGM. */
{
{
/* ignore informational status codes */
}
}
break;
}
/*
* CR4 is a bit more tedious as there are bits which cannot be cleared
* under some circumstances and such.
*/
case 4:
{
/** @todo Shouldn't this look at the guest CPUID bits to determine
* valid bits? e.g. if guest CPUID doesn't allow X86_CR4_OSXMMEEXCPT, we
* should #GP(0). */
/* reserved bits */
//if (xxx)
// fValid |= X86_CR4_VMXE;
//if (xxx)
// fValid |= X86_CR4_OSXSAVE;
{
Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* long mode checks. */
if ( (uOldCrX & X86_CR4_PAE)
&& !(uNewCrX & X86_CR4_PAE)
&& CPUMIsGuestInLongModeEx(pCtx) )
{
Log(("Trying to set clear CR4.PAE while long mode is active\n"));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Change it.
*/
{
}
else
/*
* Notify SELM and PGM.
*/
{
/* SELM - VME may change things wrt to the TSS shadowing. */
{
Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
#ifdef VBOX_WITH_RAW_MODE
#endif
}
/* PGM - flushing and mode. */
{
/* ignore informational status codes */
}
}
else
break;
}
/*
* CR8 maps to the APIC TPR.
*/
case 8:
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
break;
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
}
/*
* Advance the RIP on success.
*/
if (RT_SUCCESS(rcStrict))
{
if (rcStrict != VINF_SUCCESS)
}
return rcStrict;
}
/**
* Implements mov CRx,GReg.
*
* @param iCrReg The CRx register to write (valid).
* @param iGReg The general register to load the DRx value from.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Read the new value from the source register and call common worker.
*/
else
}
/**
*
* @param u16NewMsw The new value.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Compose the new CR0 value and call common worker.
*/
}
/**
* Implements 'CLTS'.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
uNewCr0 &= ~X86_CR0_TS;
}
/**
* Implements mov GReg,DRx.
*
* @param iGReg The general register to store the DRx value in.
* @param iDrReg The DRx register to read (0-7).
*/
{
/*
* Check preconditions.
*/
/* Raise GPs. */
return iemRaiseGeneralProtectionFault0(pIemCpu);
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* Raise #DB if general access detect is enabled. */
{
return iemRaiseDebugException(pIemCpu);
}
/*
* Read the debug register and store it in the specified general register.
*/
switch (iDrReg)
{
case 6:
case 4:
drX |= X86_DR6_RA1_MASK;
drX &= ~X86_DR6_RAZ_MASK;
break;
case 7:
case 5:
drX &= ~X86_DR7_RAZ_MASK;
break;
IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
}
else
return VINF_SUCCESS;
}
/**
* Implements mov DRx,GReg.
*
* @param iDrReg The DRx register to write (valid).
* @param iGReg The general register to load the DRx value from.
*/
{
/*
* Check preconditions.
*/
return iemRaiseGeneralProtectionFault0(pIemCpu);
{
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
iDrReg += 2;
}
/* Raise #DB if general access detect is enabled. */
* \#GP? */
{
return iemRaiseDebugException(pIemCpu);
}
/*
* Read the new value from the source register.
*/
else
/*
* Adjust it.
*/
switch (iDrReg)
{
case 0:
case 1:
case 2:
case 3:
/* nothing to adjust */
break;
case 6:
if (uNewDrX & X86_DR6_MBZ_MASK)
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
uNewDrX &= ~X86_DR6_RAZ_MASK;
break;
case 7:
if (uNewDrX & X86_DR7_MBZ_MASK)
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
uNewDrX &= ~X86_DR7_RAZ_MASK;
break;
}
/*
* Do the actual setting.
*/
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
{
}
else
return VINF_SUCCESS;
}
/**
* Implements 'INVLPG m'.
*
* @param GCPtrPage The effective address of the page to invalidate.
* @remarks Updates the RIP.
*/
{
/* ring-0 only. */
return iemRaiseGeneralProtectionFault0(pIemCpu);
if (rc == VINF_SUCCESS)
return VINF_SUCCESS;
if (rc == VINF_PGM_SYNC_CR3)
return rc;
}
/**
* Implements RDTSC.
*/
{
/*
* Check preconditions.
*/
return iemRaiseUndefinedOpcode(pIemCpu);
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Do the job.
*/
#ifdef IEM_VERIFICATION_MODE_FULL
pIemCpu->fIgnoreRaxRdx = true;
#endif
return VINF_SUCCESS;
}
/**
* Implements RDMSR.
*/
{
/*
* Check preconditions.
*/
return iemRaiseUndefinedOpcode(pIemCpu);
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Do the job.
*/
if (rc != VINF_SUCCESS)
{
#ifdef IN_RING3
if (s_cTimes++ < 10)
#endif
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
return VINF_SUCCESS;
}
/**
* Implements WRMSR.
*/
{
/*
* Check preconditions.
*/
return iemRaiseUndefinedOpcode(pIemCpu);
return iemRaiseGeneralProtectionFault0(pIemCpu);
/*
* Do the job.
*/
int rc;
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
{
}
if (rc != VINF_SUCCESS)
{
#ifdef IN_RING3
if (s_cTimes++ < 10)
#endif
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
return VINF_SUCCESS;
}
/**
* Implements 'IN eAX, port'.
*
* @param u16Port The source port.
* @param cbReg The register size.
*/
{
/*
* CPL check
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Perform the I/O.
*/
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
else
if (IOM_SUCCESS(rcStrict))
{
switch (cbReg)
{
default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
}
if (rcStrict != VINF_SUCCESS)
/*
* Check for I/O breakpoints.
*/
{
if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
}
}
return rcStrict;
}
/**
* Implements 'IN eAX, DX'.
*
* @param cbReg The register size.
*/
{
}
/**
* Implements 'OUT port, eAX'.
*
* @param u16Port The destination port.
* @param cbReg The register size.
*/
{
/*
* CPL check
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Perform the I/O.
*/
switch (cbReg)
{
default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
}
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
else
if (IOM_SUCCESS(rcStrict))
{
if (rcStrict != VINF_SUCCESS)
/*
* Check for I/O breakpoints.
*/
{
if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
}
}
return rcStrict;
}
/**
* Implements 'OUT DX, eAX'.
*
* @param cbReg The register size.
*/
{
}
/**
* Implements 'CLI'.
*/
{
{
if (!(fEfl & X86_EFL_VM))
{
fEfl &= ~X86_EFL_IF;
fEfl &= ~X86_EFL_VIF;
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* V8086 */
else if (uIopl == 3)
fEfl &= ~X86_EFL_IF;
else if ( uIopl < 3
fEfl &= ~X86_EFL_VIF;
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* real mode */
else
fEfl &= ~X86_EFL_IF;
/* Commit. */
return VINF_SUCCESS;
}
/**
* Implements 'STI'.
*/
{
{
if (!(fEfl & X86_EFL_VM))
{
fEfl |= X86_EFL_IF;
&& !(fEfl & X86_EFL_VIP) )
fEfl |= X86_EFL_VIF;
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* V8086 */
else if (uIopl == 3)
fEfl |= X86_EFL_IF;
else if ( uIopl < 3
&& !(fEfl & X86_EFL_VIP) )
fEfl |= X86_EFL_VIF;
else
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/* real mode */
else
fEfl |= X86_EFL_IF;
/* Commit. */
return VINF_SUCCESS;
}
/**
* Implements 'HLT'.
*/
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
return VINF_EM_HALT;
}
/**
* Implements 'MONITOR'.
*/
{
/*
* Permission checks.
*/
{
Log2(("monitor: CPL != 0\n"));
}
{
Log2(("monitor: Not in CPUID\n"));
return iemRaiseUndefinedOpcode(pIemCpu);
}
/*
* Gather the operands and validate them.
*/
/** @todo Test whether EAX or ECX is processed first, i.e. do we get \#PF or
* \#GP first. */
if (uEcx != 0)
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
VBOXSTRICTRC rcStrict = iemMemApplySegment(pIemCpu, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, iEffSeg, 1, &GCPtrMem);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, GCPtrMem, IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA, &GCPhysMem);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
*/
rcStrict = EMMonitorWaitPrepare(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rax, pCtx->rcx, pCtx->rdx, GCPhysMem);
return rcStrict;
}
/**
* Implements 'MWAIT'.
*/
{
/*
* Permission checks.
*/
{
Log2(("mwait: CPL != 0\n"));
/** @todo MSR[0xC0010015].MonMwaitUserEn if we care. (Remember to check
* EFLAGS.VM then.) */
return iemRaiseUndefinedOpcode(pIemCpu);
}
{
Log2(("mwait: Not in CPUID\n"));
return iemRaiseUndefinedOpcode(pIemCpu);
}
/*
* Gather the operands and validate them.
*/
if (uEcx != 0)
{
/* Only supported extension is break on IRQ when IF=0. */
if (uEcx > 1)
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
uint32_t fMWaitFeatures = 0;
{
Log2(("mwait eax=%RX32, ecx=%RX32; break-on-IRQ-IF=0 extension not enabled -> #GP(0)\n", uEax, uEcx));
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
}
/*
*/
return rcStrict;
}
/**
* Implements 'SWAPGS'.
*/
{
/*
* Permission checks.
*/
{
Log2(("swapgs: CPL != 0\n"));
return iemRaiseUndefinedOpcode(pIemCpu);
}
/*
* Do the job.
*/
return VINF_SUCCESS;
}
/**
* Implements 'CPUID'.
*/
{
CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
return VINF_SUCCESS;
}
/**
* Implements 'AAD'.
*
* @param bImm The immediate operand.
*/
{
return VINF_SUCCESS;
}
/**
* Implements 'AAM'.
*
* @param bImm The immediate operand. Cannot be 0.
*/
{
return VINF_SUCCESS;
}
/**
* Implements 'DAA'.
*/
{
{
}
else
{
}
else
return VINF_SUCCESS;
}
/**
* Implements 'DAS'.
*/
{
{
if (uInputAL < 6)
}
else
{
}
{
}
return VINF_SUCCESS;
}
/*
* Instantiate the various string operation combinations.
*/
#define OP_SIZE 8
#define ADDR_SIZE 16
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 8
#define ADDR_SIZE 32
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 8
#define ADDR_SIZE 64
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 16
#define ADDR_SIZE 16
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 16
#define ADDR_SIZE 32
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 16
#define ADDR_SIZE 64
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 32
#define ADDR_SIZE 16
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 32
#define ADDR_SIZE 32
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 32
#define ADDR_SIZE 64
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 64
#define ADDR_SIZE 32
#include "IEMAllCImplStrInstr.cpp.h"
#define OP_SIZE 64
#define ADDR_SIZE 64
#include "IEMAllCImplStrInstr.cpp.h"
/**
* Implements 'FINIT' and 'FNINIT'.
*
* @param fCheckXcpts Whether to check for umasked pending exceptions or
* not.
*/
{
return iemRaiseDeviceNotAvailable(pIemCpu);
if (fCheckXcpts && TODO )
return iemRaiseMathFault(pIemCpu);
*/
if (iemFRegIsFxSaveFormat(pIemCpu))
{
}
else
{
}
return VINF_SUCCESS;
}
/**
* Implements 'FXSAVE'.
*
* @param iEffSeg The effective segment.
* @param GCPtrEff The address of the image.
* @param enmEffOpSize The operand size (only REX.W really matters).
*/
{
/*
* Raise exceptions.
*/
return iemRaiseUndefinedOpcode(pIemCpu);
return iemRaiseDeviceNotAvailable(pIemCpu);
if (GCPtrEff & 15)
{
return iemRaiseAlignmentCheckException(pIemCpu);
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Access the memory.
*/
void *pvMem512;
VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Store the registers.
*/
* implementation specific whether MXCSR and XMM0-XMM7 are saved. */
/* common for all formats */
{
/** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
* them for now... */
}
/* FPU IP, CS, DP and DS. */
if (enmEffOpSize == IEMMODE_64BIT)
{
/* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */
pDst->au32RsrvdForSoftware[0] = 0;
}
else
{
}
/* XMM registers. */
{
/** @todo Testcase: What happens to the reserved XMM registers? Untouched,
* right? */
}
/*
* Commit the memory.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements 'FXRSTOR'.
*
* @param GCPtrEff The address of the image.
* @param enmEffOpSize The operand size (only REX.W really matters).
*/
{
/*
* Raise exceptions.
*/
return iemRaiseUndefinedOpcode(pIemCpu);
return iemRaiseDeviceNotAvailable(pIemCpu);
if (GCPtrEff & 15)
{
return iemRaiseAlignmentCheckException(pIemCpu);
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Access the memory.
*/
void *pvMem512;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Check the state for stuff which will #GP(0).
*/
if (fMXCSR & ~fMXCSR_MASK)
{
return iemRaiseGeneralProtectionFault0(pIemCpu);
}
/*
* Load the registers.
*/
* implementation specific whether MXCSR and XMM0-XMM7 are restored. */
/* common for all formats */
/* (MXCSR_MASK is read-only) */
{
}
/* FPU IP, CS, DP and DS. */
{
}
else
{
}
/* XMM registers. */
{
}
/*
* Commit the memory.
*/
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Commmon routine for fnstenv and fnsave.
*
* @param uPtr Where to store the state.
* @param pCtx The CPU context.
*/
static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
{
if (enmEffOpSize == IEMMODE_16BIT)
{
{
* protected mode or long mode and we save it in real mode? And vice
* versa? And with 32-bit operand size? I think CPU is storing the
* effective address ((CS << 4) + IP) in the offset register and not
* doing any address calculations here. */
}
else
{
}
}
else
{
/** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
{
}
else
{
}
}
}
/**
* Commmon routine for fldenv and frstor
*
* @param uPtr Where to store the state.
* @param pCtx The CPU context.
*/
static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
{
if (enmEffOpSize == IEMMODE_16BIT)
{
{
}
else
{
/** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
}
}
else
{
{
}
else
{
}
}
/* Make adjustments. */
* exceptions are pending after loading the saved state? */
}
/**
* Implements 'FNSTENV'.
*
* @param enmEffOpSize The operand size (only REX.W really matters).
* @param iEffSeg The effective segment register for @a GCPtrEff.
* @param GCPtrEffDst The address of the image.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
return VINF_SUCCESS;
}
/**
* Implements 'FNSAVE'.
*
* @param GCPtrEffDst The address of the image.
* @param enmEffOpSize The operand size.
*/
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
{
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Re-initialize the FPU.
*/
return VINF_SUCCESS;
}
/**
* Implements 'FLDENV'.
*
* @param enmEffOpSize The operand size (only REX.W really matters).
* @param iEffSeg The effective segment register for @a GCPtrEff.
* @param GCPtrEffSrc The address of the image.
*/
{
VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements 'FRSTOR'.
*
* @param GCPtrEffSrc The address of the image.
* @param enmEffOpSize The operand size.
*/
{
VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
if (rcStrict != VINF_SUCCESS)
return rcStrict;
{
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
return VINF_SUCCESS;
}
/**
* Implements 'FLDCW'.
*
* @param u16Fcw The new FCW.
*/
{
/** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
/** @todo Testcase: Try see what happens when trying to set undefined bits
* (other than 6 and 7). Currently ignoring them. */
/** @todo Testcase: Test that it raises and loweres the FPU exception bits
* according to FSW. (This is was is currently implemented.) */
/* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
return VINF_SUCCESS;
}
/**
* Implements the underflow case of fxch.
*
* @param iStReg The other stack register.
*/
{
/** @todo Testcase: fxch underflow. Making assumptions that underflowed
* registers are read as QNaN and then exchanged. This could be
* wrong... */
{
{
else
}
else
{
}
}
else
{
/* raise underflow exception, don't change anything. */
}
return VINF_SUCCESS;
}
/**
* Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
*
* @param cToAdd 1 or 7.
*/
{
/*
* Raise exceptions.
*/
return iemRaiseDeviceNotAvailable(pIemCpu);
if (u16Fsw & X86_FSW_ES)
return iemRaiseMathFault(pIemCpu);
/*
* Check if any of the register accesses causes #SF + #IA.
*/
{
uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
if ( !(u16Fsw & X86_FSW_IE)
{
}
}
{
/* Masked underflow. */
}
else
{
/* Raise underflow - don't touch EFLAGS or TOP. */
fPop = false;
}
/*
* Pop if necessary.
*/
if (fPop)
{
}
return VINF_SUCCESS;
}
/** @} */