IEMAllCImplStrInstr.cpp.h revision e5517e43d06e9eacd7e70aac52f441ad39d3c81b
/* $Id$ */
/** @file
* IEM - String Instruction Implementation Code Template.
*/
/*
* Copyright (C) 2011-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
#if OP_SIZE == 8
#else
# error "Bad OP_SIZE."
#endif
#if ADDR_SIZE == 16
# define ADDR2_TYPE uint32_t
# define ADDR2_TYPE uint32_t
# define ADDR2_TYPE uint64_t
# define IS_64_BIT_CODE(a_pIemCpu) (true)
#else
# error "Bad ADDR_SIZE."
#endif
# define IS_64_BIT_CODE(a_pIemCpu) (true)
#else
# define IS_64_BIT_CODE(a_pIemCpu) (false)
#endif
/**
* Implements 'REPE CMPS'.
*/
{
/*
* Setup.
*/
if (uCounterReg == 0)
{
return VINF_SUCCESS;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* The loop.
*/
do
{
/*
* Do segmentation and virtual page stuff.
*/
if (cLeftSrc1Page > uCounterReg)
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
&& cbIncr > 0 /** @todo Optimize reverse direction string ops. */
&& ( IS_64_BIT_CODE(pIemCpu)
)
)
{
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* If we can map the page without trouble, do a block processing
* until the end of the current page.
*/
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
if (rcStrict == VINF_SUCCESS)
{
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
if (rcStrict == VINF_SUCCESS)
{
{
/* All matches, only compare the last itme to get the right eflags. */
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
uCounterReg -= cLeftPage;
}
else
{
/* Some mismatch, compare each item (and keep volatile
memory in mind). */
do
{
off++;
&& (uEFlags & X86_EFL_ZF));
uCounterReg -= off;
}
/* Update the registers before looping. */
continue;
}
}
}
/*
* Fallback - slow processing till the end of the current page.
* In the cross page boundrary case we will end up here with cLeftPage
* as 0, we execute one loop then.
*/
do
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
cLeftPage--;
&& (uEFlags & X86_EFL_ZF));
} while ( uCounterReg != 0
&& (uEFlags & X86_EFL_ZF));
/*
* Done.
*/
return VINF_SUCCESS;
}
/**
* Implements 'REPNE CMPS'.
*/
{
/*
* Setup.
*/
if (uCounterReg == 0)
{
return VINF_SUCCESS;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* The loop.
*/
do
{
/*
* Do segmentation and virtual page stuff.
*/
if (cLeftSrc1Page > uCounterReg)
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
&& cbIncr > 0 /** @todo Optimize reverse direction string ops. */
&& ( IS_64_BIT_CODE(pIemCpu)
)
)
{
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* If we can map the page without trouble, do a block processing
* until the end of the current page.
*/
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
if (rcStrict == VINF_SUCCESS)
{
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
if (rcStrict == VINF_SUCCESS)
{
{
/* All matches, only compare the last item to get the right eflags. */
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
uCounterReg -= cLeftPage;
}
else
{
/* Some mismatch, compare each item (and keep volatile
memory in mind). */
do
{
off++;
&& !(uEFlags & X86_EFL_ZF));
uCounterReg -= off;
}
/* Update the registers before looping. */
continue;
}
}
}
/*
* Fallback - slow processing till the end of the current page.
* In the cross page boundrary case we will end up here with cLeftPage
* as 0, we execute one loop then.
*/
do
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
cLeftPage--;
&& !(uEFlags & X86_EFL_ZF));
} while ( uCounterReg != 0
&& !(uEFlags & X86_EFL_ZF));
/*
* Done.
*/
return VINF_SUCCESS;
}
/**
* Implements 'REPE SCAS'.
*/
{
/*
* Setup.
*/
if (uCounterReg == 0)
{
return VINF_SUCCESS;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* The loop.
*/
do
{
/*
* Do segmentation and virtual page stuff.
*/
if (cLeftPage > uCounterReg)
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
&& ( IS_64_BIT_CODE(pIemCpu)
)
)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* If we can map the page without trouble, do a block processing
* until the end of the current page.
*/
if (rcStrict == VINF_SUCCESS)
{
/* Search till we find a mismatching item. */
bool fQuit;
uint32_t i = 0;
do
{
/* Update the regs. */
if (fQuit)
break;
/* If unaligned, we drop thru and do the page crossing access
below. Otherwise, do the next page. */
continue;
if (uCounterReg == 0)
break;
cLeftPage = 0;
}
}
/*
* Fallback - slow processing till the end of the current page.
* In the cross page boundrary case we will end up here with cLeftPage
* as 0, we execute one loop then.
*/
do
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
cLeftPage--;
&& (uEFlags & X86_EFL_ZF));
} while ( uCounterReg != 0
&& (uEFlags & X86_EFL_ZF));
/*
* Done.
*/
return VINF_SUCCESS;
}
/**
* Implements 'REPNE SCAS'.
*/
{
/*
* Setup.
*/
if (uCounterReg == 0)
{
return VINF_SUCCESS;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* The loop.
*/
do
{
/*
* Do segmentation and virtual page stuff.
*/
if (cLeftPage > uCounterReg)
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
&& ( IS_64_BIT_CODE(pIemCpu)
)
)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* If we can map the page without trouble, do a block processing
* until the end of the current page.
*/
if (rcStrict == VINF_SUCCESS)
{
/* Search till we find a mismatching item. */
bool fQuit;
uint32_t i = 0;
do
{
/* Update the regs. */
if (fQuit)
break;
/* If unaligned, we drop thru and do the page crossing access
below. Otherwise, do the next page. */
continue;
if (uCounterReg == 0)
break;
cLeftPage = 0;
}
}
/*
* Fallback - slow processing till the end of the current page.
* In the cross page boundrary case we will end up here with cLeftPage
* as 0, we execute one loop then.
*/
do
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
cLeftPage--;
&& !(uEFlags & X86_EFL_ZF));
} while ( uCounterReg != 0
&& !(uEFlags & X86_EFL_ZF));
/*
* Done.
*/
return VINF_SUCCESS;
}
/**
* Implements 'REP MOVS'.
*/
{
/*
* Setup.
*/
if (uCounterReg == 0)
{
return VINF_SUCCESS;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Be careful with handle bypassing.
*/
if (pIemCpu->fBypassHandlers)
{
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
}
/*
* If we're reading back what we write, we have to let the verfication code
* to prevent a false positive.
* Note! This doesn't take aliasing or wrapping into account - lazy bird.
*/
#ifdef IEM_VERIFICATION_MODE_FULL
&& (cbIncr > 0
? uSrcAddrReg <= uDstAddrReg
: uDstAddrReg <= uSrcAddrReg
pIemCpu->fOverlappingMovs = true;
#endif
/*
* The loop.
*/
do
{
/*
* Do segmentation and virtual page stuff.
*/
if (cLeftSrcPage > uCounterReg)
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
&& ( IS_64_BIT_CODE(pIemCpu)
)
)
{
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* If we can map the page without trouble, do a block processing
* until the end of the current page.
*/
rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
if (rcStrict == VINF_SUCCESS)
{
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
if (rcStrict == VINF_SUCCESS)
{
/* Perform the operation exactly (don't use memcpy to avoid
having to consider how its implementation would affect
any overlapping source and destination area). */
while (cTodo-- > 0)
/* Update the registers. */
continue;
}
}
}
/*
* Fallback - slow processing till the end of the current page.
* In the cross page boundrary case we will end up here with cLeftPage
* as 0, we execute one loop then.
*/
do
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (rcStrict != VINF_SUCCESS)
return rcStrict;
cLeftPage--;
} while (uCounterReg != 0);
/*
* Done.
*/
return VINF_SUCCESS;
}
/**
* Implements 'REP STOS'.
*/
{
/*
* Setup.
*/
if (uCounterReg == 0)
{
return VINF_SUCCESS;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Be careful with handle bypassing.
*/
/** @todo Permit doing a page if correctly aligned. */
if (pIemCpu->fBypassHandlers)
{
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
}
/*
* The loop.
*/
do
{
/*
* Do segmentation and virtual page stuff.
*/
if (cLeftPage > uCounterReg)
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
&& ( IS_64_BIT_CODE(pIemCpu)
)
)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* If we can map the page without trouble, do a block processing
* until the end of the current page.
*/
if (rcStrict == VINF_SUCCESS)
{
/* Update the regs first so we can loop on cLeftPage. */
/* Do the memsetting. */
#if OP_SIZE == 8
/*#elif OP_SIZE == 32
ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
#else
while (cLeftPage-- > 0)
#endif
/* If unaligned, we drop thru and do the page crossing access
below. Otherwise, do the next page. */
continue;
if (uCounterReg == 0)
break;
cLeftPage = 0;
}
}
/*
* Fallback - slow processing till the end of the current page.
* In the cross page boundrary case we will end up here with cLeftPage
* as 0, we execute one loop then.
*/
do
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
cLeftPage--;
} while (uCounterReg != 0);
/*
* Done.
*/
return VINF_SUCCESS;
}
/**
* Implements 'REP LODS'.
*/
{
/*
* Setup.
*/
if (uCounterReg == 0)
{
return VINF_SUCCESS;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* The loop.
*/
do
{
/*
* Do segmentation and virtual page stuff.
*/
if (cLeftPage > uCounterReg)
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
&& ( IS_64_BIT_CODE(pIemCpu)
)
)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* If we can map the page without trouble, we can get away with
* just reading the last value on the page.
*/
if (rcStrict == VINF_SUCCESS)
{
/* Only get the last byte, the rest doesn't matter in direct access mode. */
#if OP_SIZE == 32
#else
#endif
/* If unaligned, we drop thru and do the page crossing access
below. Otherwise, do the next page. */
continue;
if (uCounterReg == 0)
break;
cLeftPage = 0;
}
}
/*
* Fallback - slow processing till the end of the current page.
* In the cross page boundrary case we will end up here with cLeftPage
* as 0, we execute one loop then.
*/
do
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
#if OP_SIZE == 32
#else
#endif
cLeftPage--;
if (rcStrict != VINF_SUCCESS)
break;
} while (uCounterReg != 0);
/*
* Done.
*/
return VINF_SUCCESS;
}
#if OP_SIZE != 64
/**
* Implements 'INS' (no rep)
*/
{
/*
* Be careful with handle bypassing.
*/
if (pIemCpu->fBypassHandlers)
{
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
}
/*
* ASSUMES the #GP for I/O permission is taken first, then any #GP for
* segmentation and finally any #PF due to virtual address translation.
* ASSUMES nothing is read from the I/O port before traps are taken.
*/
if (!fIoChecked)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
if (IOM_SUCCESS(rcStrict))
{
{
else
}
/* iemMemMap already check permissions, so this may only be real errors
or access handlers medling. The access handler case is going to
cause misbehavior if the instruction is re-interpreted or smth. So,
we fail with an internal error here instead. */
else
}
return rcStrict;
}
/**
* Implements 'REP INS'.
*/
{
/*
* Setup.
*/
if (!fIoChecked)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
if (uCounterReg == 0)
{
return VINF_SUCCESS;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* Be careful with handle bypassing.
*/
if (pIemCpu->fBypassHandlers)
{
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
}
/*
* The loop.
*/
do
{
/*
* Do segmentation and virtual page stuff.
*/
if (cLeftPage > uCounterReg)
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
&& ( IS_64_BIT_CODE(pIemCpu)
)
)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* If we can map the page without trouble, we would've liked to use
* an string I/O method to do the work, but the current IOM
* interface doesn't match our current approach. So, do a regular
* loop instead.
*/
/** @todo Change the I/O manager interface to make use of
* mapped buffers instead of leaving those bits to the
* device implementation! */
if (rcStrict == VINF_SUCCESS)
{
{
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
if (IOM_SUCCESS(rcStrict))
{
}
if (rcStrict != VINF_SUCCESS)
{
if (IOM_SUCCESS(rcStrict))
{
if (uCounterReg == 0)
}
return rcStrict;
}
off++;
}
/* If unaligned, we drop thru and do the page crossing access
below. Otherwise, do the next page. */
continue;
if (uCounterReg == 0)
break;
cLeftPage = 0;
}
}
/*
* Fallback - slow processing till the end of the current page.
* In the cross page boundrary case we will end up here with cLeftPage
* as 0, we execute one loop then.
*
* Note! We ASSUME the CPU will raise #PF or #GP before access the
* I/O port, otherwise it wouldn't really be restartable.
*/
/** @todo investigate what the CPU actually does with \#PF/\#GP
* during INS. */
do
{
rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
if (!IOM_SUCCESS(rcStrict))
return rcStrict;
cLeftPage--;
if (rcStrict != VINF_SUCCESS)
{
if (uCounterReg == 0)
return rcStrict;
}
} while (uCounterReg != 0);
/*
* Done.
*/
return VINF_SUCCESS;
}
/**
* Implements 'OUTS' (no rep)
*/
IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
{
/*
* ASSUMES the #GP for I/O permission is taken first, then any #GP for
* segmentation and finally any #PF due to virtual address translation.
* ASSUMES nothing is read from the I/O port before traps are taken.
*/
if (!fIoChecked)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
if (rcStrict == VINF_SUCCESS)
{
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
if (IOM_SUCCESS(rcStrict))
{
else
if (rcStrict != VINF_SUCCESS)
}
}
return rcStrict;
}
/**
* Implements 'REP OUTS'.
*/
IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
{
/*
* Setup.
*/
if (!fIoChecked)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
}
if (uCounterReg == 0)
{
return VINF_SUCCESS;
}
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* The loop.
*/
do
{
/*
* Do segmentation and virtual page stuff.
*/
if (cLeftPage > uCounterReg)
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
&& ( IS_64_BIT_CODE(pIemCpu)
)
)
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
/*
* If we can map the page without trouble, we would've liked to use
* an string I/O method to do the work, but the current IOM
* interface doesn't match our current approach. So, do a regular
* loop instead.
*/
/** @todo Change the I/O manager interface to make use of
* mapped buffers instead of leaving those bits to the
* device implementation? */
if (rcStrict == VINF_SUCCESS)
{
{
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
if (IOM_SUCCESS(rcStrict))
{
}
if (rcStrict != VINF_SUCCESS)
{
if (IOM_SUCCESS(rcStrict))
{
if (uCounterReg == 0)
}
return rcStrict;
}
off++;
}
/* If unaligned, we drop thru and do the page crossing access
below. Otherwise, do the next page. */
continue;
if (uCounterReg == 0)
break;
cLeftPage = 0;
}
}
/*
* Fallback - slow processing till the end of the current page.
* In the cross page boundrary case we will end up here with cLeftPage
* as 0, we execute one loop then.
*
* Note! We ASSUME the CPU will raise #PF or #GP before access the
* I/O port, otherwise it wouldn't really be restartable.
*/
/** @todo investigate what the CPU actually does with \#PF/\#GP
* during INS. */
do
{
if (rcStrict != VINF_SUCCESS)
return rcStrict;
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
else
if (IOM_SUCCESS(rcStrict))
{
cLeftPage--;
}
if (rcStrict != VINF_SUCCESS)
{
if (IOM_SUCCESS(rcStrict))
{
if (uCounterReg == 0)
}
return rcStrict;
}
} while (uCounterReg != 0);
/*
* Done.
*/
return VINF_SUCCESS;
}
#endif /* OP_SIZE != 64-bit */