IEMAllCImpl.cpp.h revision ca82e9e77743b3e6caae138a83a4c2ca942294e4
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * IEM - Instruction Implementation in C/C++ (code include).
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * Copyright (C) 2011 Oracle Corporation
c98fb3e16fcd571a790eab772c0c66173d225205vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
c98fb3e16fcd571a790eab772c0c66173d225205vboxsync * available from http://www.virtualbox.org. This file is free software;
c98fb3e16fcd571a790eab772c0c66173d225205vboxsync * you can redistribute it and/or modify it under the terms of the GNU
a16eb14ad7a4b5ef91ddc22d3e8e92d930f736fcvboxsync * General Public License (GPL) as published by the Free Software
a16eb14ad7a4b5ef91ddc22d3e8e92d930f736fcvboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
a16eb14ad7a4b5ef91ddc22d3e8e92d930f736fcvboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
a16eb14ad7a4b5ef91ddc22d3e8e92d930f736fcvboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
a16eb14ad7a4b5ef91ddc22d3e8e92d930f736fcvboxsync/** @name Misc Helpers
a16eb14ad7a4b5ef91ddc22d3e8e92d930f736fcvboxsync * Checks if we are allowed to access the given I/O port, raising the
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * appropriate exceptions if we aren't (or if the I/O bitmap is not
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * accessible).
1c94c0a63ba68be1a7b2c640e70d7a06464e4fcavboxsync * @returns Strict VBox status code.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param pIemCpu The IEM per CPU data.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param pCtx The register context.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param u16Port The port number.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param cbOperand The operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncDECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Calculates the parity bit.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @returns true if the bit is set, false if not.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param u8Result The least significant byte of the result.
e7e589ca404045e288030a4151e57b63976cb39dvboxsync * Parity is set if the number of bits in the least significant byte of
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * the result is even.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync#endif /* not used */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Updates the specified flags according to a 8-bit result.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param pIemCpu The.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param u8Result The result to set the flags according to.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param fToUpdate The flags to update.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param fUndefined The flags that are specified as undefined.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncstatic void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Loads a NULL data selector into a selector register, both the hidden and
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * visible parts, in protected mode.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param puSel The selector register.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param pHid The hidden register part.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncstatic void iemHlpLoadNullDataSelectorProt(PRTSEL puSel, PCPUMSELREGHID pHid)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo write a testcase checking what happends when loading a NULL data
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * selector in protected mode. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Helper used by iret.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param uCpl The new CPL.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param puSel The selector register.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param pHid The corresponding hidden register.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncstatic void iemHlpAdjustSelectorForNewCpl(uint8_t uCpl, PRTSEL puSel, PCPUMSELREGHID pHid)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync && pHid->Attr.n.u1DescType /* code or data, not system */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync && (pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync/** @name C Implementations
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements a 16-bit popa.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * The docs are a bit hard to comprehend here, but it looks like we wrap
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * around in real mode as long as none of the individual "popa" crosses the
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * end of the stack segment. In protected mode we check the whole access
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * in one go. For efficiency, only do the word-by-word thing if we're in
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * danger of wrapping around.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo do popa boundary / wrap-around checks. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* word-by-word */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* skip sp */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements a 32-bit popa.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * The docs are a bit hard to comprehend here, but it looks like we wrap
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * around in real mode as long as none of the individual "popa" crosses the
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * end of the stack segment. In protected mode we check the whole access
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * in one go. For efficiency, only do the word-by-word thing if we're in
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * danger of wrapping around.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo do popa boundary / wrap-around checks. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync && (pCtx->csHid.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* word-by-word */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* skip esp */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements a 16-bit pusha.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * The docs are a bit hard to comprehend here, but it looks like we wrap
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * around in real mode as long as none of the individual "pushd" crosses the
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * end of the stack segment. In protected mode we check the whole access
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * in one go. For efficiency, only do the word-by-word thing if we're in
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * danger of wrapping around.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo do pusha boundary / wrap-around checks. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* word-by-word */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements a 32-bit pusha.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * The docs are a bit hard to comprehend here, but it looks like we wrap
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * around in real mode as long as none of the individual "pusha" crosses the
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * end of the stack segment. In protected mode we check the whole access
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * in one go. For efficiency, only do the word-by-word thing if we're in
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * danger of wrapping around.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo do pusha boundary / wrap-around checks. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* word-by-word */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements pushf.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * If we're in V8086 mode some care is required (which is why we're in
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * doing this in a C implementation).
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Ok, clear RF and VM and push the flags.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements popf.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * V8086 is special as usual.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Almost anything goes if IOPL is 3.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Interrupt flag virtualization with CR4.VME=1.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * or before? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Not in V8086 mode.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Pop the flags. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Merge them with the current flags. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Commit the flags.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements an indirect call.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param uNewPC The new program counter (RIP) value (loaded from the
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * operand).
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements a 16-bit relative call.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param offDisp The displacment offset.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements a 32-bit indirect call.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param uNewPC The new program counter (RIP) value (loaded from the
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * operand).
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements a 32-bit relative call.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param offDisp The displacment offset.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements a 64-bit indirect call.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param uNewPC The new program counter (RIP) value (loaded from the
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * operand).
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements a 64-bit relative call.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param offDisp The displacment offset.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements far jumps.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param uSel The selector.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param offSeg The segment offset.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
e7e589ca404045e288030a4151e57b63976cb39dvboxsyncIEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint32_t, offSeg, IEMMODE, enmEffOpSize)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Real mode and V8086 mode are easy. The only snag seems to be that
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * CS.limit doesn't change and the limit check is done against the current
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo REM reset the accessed bit (see on jmp far16 after disabling
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * PE. Check with VT-x and AMD-V. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Protected mode. Need to parse the specified descriptor...
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> invalid selector, #GP(0)\n", uSel, offSeg));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Fetch the descriptor. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Is it there? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> segment not present\n", uSel, offSeg));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Deal with it according to its type.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Only code segments. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* L vs D. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> both L and D are set.\n", uSel, offSeg));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* DPL/RPL/CPL check, where conforming segments makes a difference. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> DPL violation (conforming); DPL=%d CPL=%u\n",
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Limit check. (Should alternatively check for non-canonical addresses
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync here, but that is ruled out by offSeg being 32-bit, right?) */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Ok, everything checked out fine. Now set the accessed bit before
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * committing the result into CS, CSHID and RIP.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* commit */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync pCtx->cs |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync pCtx->csHid.Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo check if the hidden bits are loaded correctly for 64-bit
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * System selector.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Call various functions to do the work. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> wrong sys selector (64-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Call various functions to do the work. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Call various functions to do the work. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("jmpf %04x:%08x -> wrong sys selector (32-bit): %d\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements far calls.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param uSel The selector.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param offSeg The segment offset.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmOpSize The operand size (in case we need it).
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Real mode and V8086 mode are easy. The only snag seems to be that
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * CS.limit doesn't change and the limit check is done against the current
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Assert(enmOpSize == IEMMODE_16BIT || enmOpSize == IEMMODE_32BIT);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Check stack first - may #SS(0). */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmOpSize == IEMMODE_32BIT ? 6 : 4,
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Check the target address range. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Everything is fine, push the return address. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPushCommitSpecial(pIemCpu, pvRet, uNewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Branch. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo Does REM reset the accessed bit here to? (See on jmp far16
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * after disabling PE.) Check with VT-x and AMD-V. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements retf.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param cbPop The amount of arguments to pop from the stack
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Real mode and V8086 mode are easy.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 8 : 4,
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo check how this is supposed to work if sp=0xfffe. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Check the limit of the new EIP. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo Intel pseudo code only does the limit check for 16-bit
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * operands, AMD does not make any distinction. What is right? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* commit the operation. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopCommitSpecial(pIemCpu, pu16Frame, uNewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo do we load attribs and limit as well? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements retn.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * We're doing this in C because of the \#GP that might be raised if the popped
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * program counter is out of bounds.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param cbPop The amount of arguments to pop from the stack
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Fetch the RSP from the stack. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Check the new RSP before loading it. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo Should test this as the intel+amd pseudo code doesn't mention half
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * of it. The canonical test is performed here and for call. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->csHid.u32Limit));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Commit it. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements leave.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * We're doing this in C because messing with the stack registers is annoying
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * since they depends on SS attributes.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Calculate the intermediate RSP from RBP and the stack attributes. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo Check that LEAVE actually preserve the high EBP bits. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Pop RBP according to the operand size. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Commit it. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements int3 and int XX.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param u8Int The interrupt vector number.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param fIsBpInstr Is it the breakpoint instruction.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements iret for real mode and V8086 mode.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * iret throws an exception if VME isn't enabled.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Do the stack bits, but don't commit RSP before everything checks
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * out right.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo The intel pseudo code does not indicate what happens to
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * reserved flags. We just ignore them. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo Check how this is supposed to work if sp=0xfffe. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Check the limit of the new EIP.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo Only the AMD pseudo code check the limit here, what's
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * right? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * V8086 checks and flag adjustments
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Preserve IOPL and clear RF. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Commit the operation.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo do we load attribs and limit as well? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements iret for protected mode
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Nested task return.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Normal return.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Do the stack bits, but don't commit RSP before everything checks
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * out right.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * What are we returning to?
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* V8086 mode! */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Protected mode.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Read the CS descriptor. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCS, uNewEip));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCS, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Must be a code descriptor. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Privilege checks. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCS, uNewEip, pIemCpu->uCpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync && (uNewCS & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCS, uNewEip, DescCS.Legacy.Gen.u2Dpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Present? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCS, uNewEip));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Return to outer level?
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Read the SS descriptor. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCS, uNewEip, uNewSS, uNewESP));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewCS, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Privilege checks. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if ((uNewSS & X86_SEL_RPL) != (uNewCS & X86_SEL_RPL))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCS, uNewEip, uNewSS, uNewESP));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (DescSS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewCS, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Must be a writeable data segment descriptor. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewCS, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uNewCS, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Present? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCS, uNewEip, uNewSS, uNewESP));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Check EIP. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Commit the changes, marking CS and SS accessed first since
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * that may fail.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync pCtx->ssHid.Attr.u = X86DESC_GET_HID_ATTR(DescSS.Legacy);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync iemHlpAdjustSelectorForNewCpl(uNewCS & X86_SEL_RPL, &pCtx->ds, &pCtx->dsHid);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync iemHlpAdjustSelectorForNewCpl(uNewCS & X86_SEL_RPL, &pCtx->es, &pCtx->esHid);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync iemHlpAdjustSelectorForNewCpl(uNewCS & X86_SEL_RPL, &pCtx->fs, &pCtx->fsHid);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync iemHlpAdjustSelectorForNewCpl(uNewCS & X86_SEL_RPL, &pCtx->gs, &pCtx->gsHid);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Done! */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Return to the same level.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Check EIP. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCS, uNewEip, cbLimitCS));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Commit the changes, marking CS first since it may fail.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync pCtx->csHid.Attr.u = X86DESC_GET_HID_ATTR(DescCS.Legacy);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Done! */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements iret for long mode
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync //VBOXSTRICTRC rcStrict;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync //uint64_t uNewRsp;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements iret.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param enmEffOpSize The effective operand size.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Call a mode specific worker.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param iSegReg The segment register number (valid).
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param uSel The new selector value.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Real mode and V8086 mode are easy.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo Does the CPU actually load limits and attributes in the
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * real/V8086 mode segment load case? It doesn't for CS in far
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * jumps... Affects unreal mode. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Protected mode.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Check if it's a null segment selector value first, that's OK for DS, ES,
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * FS and GS. If not null, then we have to load and parse the descriptor.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* In 64-bit kernel mode, the stack can be 0 because of the way
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync interrupts are dispatched when in kernel ctx. Just load the
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync selector value into the register and leave the hidden bits
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo figure out what this actually does, it works. Needs
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * testcase! */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Fetch the descriptor. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Check GPs first. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync#if 0 /* this is what intel says. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync#else /* this is what makes more sense. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* Is it there? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* The the base and limit. */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Ok, everything checked out fine. Now set the accessed bit before
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * committing the result into the registers.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /* commit */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync pHid->Attr.u = (Desc.Legacy.u >> (16+16+8)) & UINT32_C(0xf0ff); /** @todo do we have a define for 0xf0ff? */
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync /** @todo check if the hidden bits are loaded correctly for 64-bit
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * Implements 'mov SReg, r/m'.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param iSegReg The segment register number (valid).
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync * @param uSel The new selector value.
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsyncIEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
9c9df2b728333cb734a7cc7856568e9ea9dc4600vboxsync EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
return rcStrict;
switch (enmEffOpSize)
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return rcStrict;
switch (enmEffOpSize)
case IEMMODE_16BIT:
case IEMMODE_32BIT:
case IEMMODE_64BIT:
return rcStrict;
VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
return rcStrict;
VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
return rcStrict;
return VINF_SUCCESS;
return rcStrict;
return VINF_SUCCESS;
return rcStrict;
if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
void *pvDesc;
return rcStrict;
return rcStrict;
return VINF_SUCCESS;
switch (iCrReg)
AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
return VINF_SUCCESS;
int rc;
switch (iCrReg)
Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
return rcStrict;
switch (iDrReg)
return VINF_SUCCESS;
switch (iDrReg)
return VINF_SUCCESS;
return VINF_SUCCESS;
return rc;
#ifdef IEM_VERIFICATION_MODE
return VINF_SUCCESS;
return VINF_SUCCESS;
return rcStrict;
switch (cbReg)
return rcStrict;
switch (cbReg)
return rc;
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_EM_HALT;
CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
return VINF_SUCCESS;
return VINF_SUCCESS;
return VINF_SUCCESS;
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
#include "IEMAllCImplStrInstr.cpp.h"
return VINF_SUCCESS;
return VERR_IEM_INSTR_NOT_IMPLEMENTED;
return VERR_IEM_INSTR_NOT_IMPLEMENTED;