1N/A; IEM - Instruction Implementation in Assembly.
1N/A; Copyright (C) 2011 Oracle Corporation
1N/A; This file is part of VirtualBox Open Source Edition (OSE), as
1N/A; you can redistribute it
and/or modify it under the terms of the GNU
1N/A; General Public License (GPL) as published by the Free Software
1N/A; Foundation, in version 2 as it comes in the "COPYING" file of the
1N/A; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
1N/A; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
1N/A;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
1N/A;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
1N/A;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
1N/A; Defined Constants And Macros ;
1N/A;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
1N/A; RET XX / RET wrapper for fastcall.
1N/A%macro RET_FASTCALL 1
1N/A %ifdef RT_OS_WINDOWS
1N/A; NAME for fastcall functions.
1N/A;; @todo 'global @fastcall@12' is still broken in yasm and requires dollar
1N/A; escaping (or whatever the dollar is good for here). Thus the ugly
1N/A%define NAME_FASTCALL(a_Name, a_cbArgs, a_Dollar) NAME(a_Name)
1N/A %ifdef RT_OS_WINDOWS
1N/A %undef NAME_FASTCALL
1N/A %define NAME_FASTCALL(a_Name, a_cbArgs, a_Prefix) a_Prefix %+ a_Name %+ @ %+ a_cbArgs
1N/A; BEGINPROC for fastcall functions.
1N/A; @param 1 The function name (C).
1N/A; @param 2 The argument size on x86.
1N/A%macro BEGINPROC_FASTCALL 2
1N/A %ifdef ASM_FORMAT_PE
1N/A export %1=NAME_FASTCALL(%1,%2,$@)
1N/A %ifdef ASM_FORMAT_OMF
1N/A export NAME(%1) NAME_FASTCALL(%1,%2,$@)
1N/A %ifndef ASM_FORMAT_BIN
1N/A global NAME_FASTCALL(%1,%2,$@)
1N/ANAME_FASTCALL(%1,%2,@):
1N/A; We employ some macro assembly here to hid the calling convention differences.
1N/A %macro PROLOGUE_1_ARGS 0
1N/A %macro EPILOGUE_1_ARGS 1
1N/A %macro PROLOGUE_2_ARGS 0
1N/A %macro EPILOGUE_2_ARGS 1
1N/A %macro PROLOGUE_3_ARGS 0
1N/A %macro EPILOGUE_3_ARGS 1
1N/A %macro PROLOGUE_4_ARGS 0
1N/A %macro EPILOGUE_4_ARGS 1
1N/A %ifdef ASM_CALL64_GCC
1N/A %ifdef ASM_CALL64_MSC
mov ebx, [esp + 12 + 4 + 0]
mov esi, [esp + 12 + 4 + 4]
; Load the relevant flags from [%1] if there are undefined flags (%3).
; @remarks Clobbers T0, stack. Changes EFLAGS.
; @param A2 The register pointing to the flags.
; @param 1 The parameter (
A0..A3) pointing to the eflags.
; @param 2 The set of modified flags.
; @param 3 The set of undefined flags.
%macro IEM_MAYBE_LOAD_FLAGS 3
pushf ; store current flags
mov T0_32, [%1] ; load the guest flags
and dword [xSP], ~(%2 | %3) ; mask out the modified and undefined flags
and T0_32, (%2 | %3) ; select the modified and undefined flags.
or [xSP], T0 ; merge guest flags with host flags.
popf ; load the mixed flags.
; @remarks Clobbers T0, T1, stack.
; @param 1 The register pointing to the EFLAGS.
; @param 2 The mask of modified flags to save.
; @param 3 The mask of undefined flags to (maybe) save.
and T0_32, ~(%2 | %3) ; clear the modified & undefined flags.
and T1_32, (%2 | %3) ; select the modified and undefined flags.
or T0_32, T1_32 ; combine the flags.
mov [%1], T0_32 ; save the flags.
; Macro for implementing a binary operator.
; This will generate code for the 8, 16, 32 and 64 bit accesses with locked
; variants, except on 32-bit system where the 64-bit accesses requires hand
; All the functions takes a pointer to the destination memory operand in A0,
; the source register operand in A1 and a pointer to eflags in A2.
; @param 1 The instruction mnemonic.
; @param 2 Non-zero if there should be a locked version.
; @param 3 The modified flags.
; @param 4 The undefined flags.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u8
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u16
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u32
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u64
%else ; stub it for now - later, replace with hand coded stuff.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16
ENDPROC iemAImpl_ %+ %1 %+ _u64
%if %2 != 0 ; locked versions requested?
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8_locked, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u8_locked
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16_locked, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u16_locked
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32_locked, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
lock %1 dword [A0], A1_32
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u32_locked
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 16
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u64_locked
%else ; stub it for now - later, replace with hand coded stuff.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 16
ENDPROC iemAImpl_ %+ %1 %+ _u64_locked
; instr,lock,modified-flags.
IEMIMPL_BIN_OP add, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEMIMPL_BIN_OP adc, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEMIMPL_BIN_OP sub, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEMIMPL_BIN_OP sbb, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEMIMPL_BIN_OP or, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF,
IEMIMPL_BIN_OP xor, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF,
IEMIMPL_BIN_OP and, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF,
IEMIMPL_BIN_OP cmp, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEMIMPL_BIN_OP test, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF,
; Macro for implementing a bit operator.
; This will generate code for the 16, 32 and 64 bit accesses with locked
; variants, except on 32-bit system where the 64-bit accesses requires hand
; All the functions takes a pointer to the destination memory operand in A0,
; the source register operand in A1 and a pointer to eflags in A2.
; @param 1 The instruction mnemonic.
; @param 2 Non-zero if there should be a locked version.
; @param 3 The modified flags.
; @param 4 The undefined flags.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u16
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u32
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u64
%else ; stub it for now - later, replace with hand coded stuff.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16
ENDPROC iemAImpl_ %+ %1 %+ _u64
%if %2 != 0 ; locked versions requested?
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16_locked, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u16_locked
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32_locked, 12
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
lock %1 dword [A0], A1_32
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u32_locked
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 16
IEM_MAYBE_LOAD_FLAGS A2, %3, %4
IEM_SAVE_FLAGS A2, %3, %4
ENDPROC iemAImpl_ %+ %1 %+ _u64_locked
%else ; stub it for now - later, replace with hand coded stuff.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 16
ENDPROC iemAImpl_ %+ %1 %+ _u64_locked
IEMIMPL_BIT_OP bt, 0, (X86_EFL_CF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
IEMIMPL_BIT_OP btc, 1, (X86_EFL_CF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
IEMIMPL_BIT_OP bts, 1, (X86_EFL_CF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
IEMIMPL_BIT_OP btr, 1, (X86_EFL_CF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
; Macro for implementing a bit search operator.
; This will generate code for the 16, 32 and 64 bit accesses, except on 32-bit
; system where the 64-bit accesses requires hand coding.
; All the functions takes a pointer to the destination memory operand in A0,
; the source register operand in A1 and a pointer to eflags in A2.
; @param 1 The instruction mnemonic.
; @param 2 The modified flags.
; @param 3 The undefined flags.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 12
IEM_MAYBE_LOAD_FLAGS A2, %2, %3
IEM_SAVE_FLAGS A2, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u16
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 12
IEM_MAYBE_LOAD_FLAGS A2, %2, %3
IEM_SAVE_FLAGS A2, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u32
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16
IEM_MAYBE_LOAD_FLAGS A2, %2, %3
IEM_SAVE_FLAGS A2, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u64
%else ; stub it for now - later, replace with hand coded stuff.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16
ENDPROC iemAImpl_ %+ %1 %+ _u64
IEMIMPL_BIT_OP bsf, (X86_EFL_ZF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF)
IEMIMPL_BIT_OP bsr, (X86_EFL_ZF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF)
; IMUL is also a similar but yet different case (no lock, no mem dst).
; The rDX:rAX variant of imul is handled together with mul further down.
BEGINPROC_FASTCALL iemAImpl_imul_two_u16, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
ENDPROC iemAImpl_imul_two_u16
BEGINPROC_FASTCALL iemAImpl_imul_two_u32, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
ENDPROC iemAImpl_imul_two_u32
BEGINPROC_FASTCALL iemAImpl_imul_two_u64, 16
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
int3 ;; @todo implement me
ENDPROC iemAImpl_imul_two_u64
; XCHG for memory operands. This implies locking. No flag changes.
; Each function takes two arguments, first the pointer to the memory,
; then the pointer to the register. They all return void.
BEGINPROC_FASTCALL iemAImpl_xchg_u8, 8
BEGINPROC_FASTCALL iemAImpl_xchg_u16, 8
ENDPROC iemAImpl_xchg_u16
BEGINPROC_FASTCALL iemAImpl_xchg_u32, 8
ENDPROC iemAImpl_xchg_u32
BEGINPROC_FASTCALL iemAImpl_xchg_u64, 8
ENDPROC iemAImpl_xchg_u64
; XADD for memory operands.
; Each function takes three arguments, first the pointer to the
;
memory/register, then the pointer to the register, and finally a pointer to
; eflags. They all return void.
BEGINPROC_FASTCALL iemAImpl_xadd_u8, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
BEGINPROC_FASTCALL iemAImpl_xadd_u16, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
ENDPROC iemAImpl_xadd_u16
BEGINPROC_FASTCALL iemAImpl_xadd_u32, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
ENDPROC iemAImpl_xadd_u32
BEGINPROC_FASTCALL iemAImpl_xadd_u64, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
ENDPROC iemAImpl_xadd_u64
BEGINPROC_FASTCALL iemAImpl_xadd_u8_locked, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
ENDPROC iemAImpl_xadd_u8_locked
BEGINPROC_FASTCALL iemAImpl_xadd_u16_locked, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
ENDPROC iemAImpl_xadd_u16_locked
BEGINPROC_FASTCALL iemAImpl_xadd_u32_locked, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
ENDPROC iemAImpl_xadd_u32_locked
BEGINPROC_FASTCALL iemAImpl_xadd_u64_locked, 12
IEM_MAYBE_LOAD_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEM_SAVE_FLAGS A2, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
ENDPROC iemAImpl_xadd_u64_locked
; Macro for implementing a unary operator.
; This will generate code for the 8, 16, 32 and 64 bit accesses with locked
; variants, except on 32-bit system where the 64-bit accesses requires hand
; All the functions takes a pointer to the destination memory operand in A0,
; the source register operand in A1 and a pointer to eflags in A2.
; @param 1 The instruction mnemonic.
; @param 2 The modified flags.
; @param 3 The undefined flags.
%macro IEMIMPL_UNARY_OP 3
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8, 8
IEM_MAYBE_LOAD_FLAGS A1, %2, %3
IEM_SAVE_FLAGS A1, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u8
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8_locked, 8
IEM_MAYBE_LOAD_FLAGS A1, %2, %3
IEM_SAVE_FLAGS A1, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u8_locked
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 8
IEM_MAYBE_LOAD_FLAGS A1, %2, %3
IEM_SAVE_FLAGS A1, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u16
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16_locked, 8
IEM_MAYBE_LOAD_FLAGS A1, %2, %3
IEM_SAVE_FLAGS A1, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u16_locked
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 8
IEM_MAYBE_LOAD_FLAGS A1, %2, %3
IEM_SAVE_FLAGS A1, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u32
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32_locked, 8
IEM_MAYBE_LOAD_FLAGS A1, %2, %3
IEM_SAVE_FLAGS A1, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u32_locked
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 8
IEM_MAYBE_LOAD_FLAGS A1, %2, %3
IEM_SAVE_FLAGS A1, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u64
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 8
IEM_MAYBE_LOAD_FLAGS A1, %2, %3
IEM_SAVE_FLAGS A1, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u64_locked
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 8
ENDPROC iemAImpl_ %+ %1 %+ _u64
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_locked, 8
ENDPROC iemAImpl_ %+ %1 %+ _u64_locked
IEMIMPL_UNARY_OP inc, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF), 0
IEMIMPL_UNARY_OP dec, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF), 0
IEMIMPL_UNARY_OP neg, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0
IEMIMPL_UNARY_OP not, 0, 0
; Macro for implementing a shift operation.
; This will generate code for the 8, 16, 32 and 64 bit accesses, except on
; 32-bit system where the 64-bit accesses requires hand coding.
; All the functions takes a pointer to the destination memory operand in A0,
; the shift count in A1 and a pointer to eflags in A2.
; @param 1 The instruction mnemonic.
; @param 2 The modified flags.
; @param 3 The undefined flags.
; Makes ASSUMPTIONS about A0, A1 and A2 assignments.
%macro IEMIMPL_SHIFT_OP 3
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8, 12
IEM_MAYBE_LOAD_FLAGS A2, %2, %3
IEM_SAVE_FLAGS A2, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u8
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 12
IEM_MAYBE_LOAD_FLAGS A2, %2, %3
IEM_SAVE_FLAGS A2, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u16
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 12
IEM_MAYBE_LOAD_FLAGS A2, %2, %3
IEM_SAVE_FLAGS A2, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u32
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12
IEM_MAYBE_LOAD_FLAGS A2, %2, %3
IEM_SAVE_FLAGS A2, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u64
%else ; stub it for now - later, replace with hand coded stuff.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12
ENDPROC iemAImpl_ %+ %1 %+ _u64
IEMIMPL_SHIFT_OP rol, (X86_EFL_OF | X86_EFL_CF), 0
IEMIMPL_SHIFT_OP ror, (X86_EFL_OF | X86_EFL_CF), 0
IEMIMPL_SHIFT_OP rcl, (X86_EFL_OF | X86_EFL_CF), 0
IEMIMPL_SHIFT_OP rcr, (X86_EFL_OF | X86_EFL_CF), 0
IEMIMPL_SHIFT_OP shl, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF)
IEMIMPL_SHIFT_OP shr, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF)
IEMIMPL_SHIFT_OP sar, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF)
; Macro for implementing a double precision shift operation.
; This will generate code for the 16, 32 and 64 bit accesses, except on
; 32-bit system where the 64-bit accesses requires hand coding.
; The functions takes the destination operand (r/m) in A0, the source (reg) in
; @param 1 The instruction mnemonic.
; @param 2 The modified flags.
; @param 3 The undefined flags.
; Makes ASSUMPTIONS about A0, A1, A2 and A3 assignments.
%macro IEMIMPL_SHIFT_DBL_OP 3
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 16
IEM_MAYBE_LOAD_FLAGS A3, %2, %3
IEM_SAVE_FLAGS A3, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u16
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 16
IEM_MAYBE_LOAD_FLAGS A3, %2, %3
IEM_SAVE_FLAGS A3, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u32
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20
IEM_MAYBE_LOAD_FLAGS A3, %2, %3
IEM_SAVE_FLAGS A3, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u64
%else ; stub it for now - later, replace with hand coded stuff.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20
ENDPROC iemAImpl_ %+ %1 %+ _u64
IEMIMPL_SHIFT_DBL_OP shld, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF)
IEMIMPL_SHIFT_DBL_OP shrd, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF)
; Macro for implementing a multiplication operations.
; This will generate code for the 8, 16, 32 and 64 bit accesses, except on
; 32-bit system where the 64-bit accesses requires hand coding.
; The 8-bit function only operates on AX, so it takes no DX pointer. The other
; functions takes a pointer to rAX in A0, rDX in A1, the operand in A2 and a
; pointer to eflags in A3.
; The functions all return 0 so the caller can be used for
div/idiv as well as
; @param 1 The instruction mnemonic.
; @param 2 The modified flags.
; @param 3 The undefined flags.
; Makes ASSUMPTIONS about A0, A1, A2, A3, T0 and T1 assignments.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8, 12
IEM_MAYBE_LOAD_FLAGS A2, %2, %3
IEM_SAVE_FLAGS A2, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u8
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 16
IEM_MAYBE_LOAD_FLAGS A3, %2, %3
IEM_SAVE_FLAGS A3, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u16
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 16
IEM_MAYBE_LOAD_FLAGS A3, %2, %3
IEM_SAVE_FLAGS A3, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u32
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20
IEM_MAYBE_LOAD_FLAGS A3, %2, %3
IEM_SAVE_FLAGS A3, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u64
%else ; stub it for now - later, replace with hand coded stuff.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20
ENDPROC iemAImpl_ %+ %1 %+ _u64
IEMIMPL_MUL_OP mul, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
IEMIMPL_MUL_OP imul, (X86_EFL_OF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF)
; Macro for implementing a division operations.
; This will generate code for the 8, 16, 32 and 64 bit accesses, except on
; 32-bit system where the 64-bit accesses requires hand coding.
; The 8-bit function only operates on AX, so it takes no DX pointer. The other
; functions takes a pointer to rAX in A0, rDX in A1, the operand in A2 and a
; pointer to eflags in A3.
; The functions all return 0 on success and -1 if a divide error should be
; @param 1 The instruction mnemonic.
; @param 2 The modified flags.
; @param 3 The undefined flags.
; Makes ASSUMPTIONS about A0, A1, A2, A3, T0 and T1 assignments.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u8, 12
;; @todo test for overflow
IEM_MAYBE_LOAD_FLAGS A2, %2, %3
IEM_SAVE_FLAGS A2, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u8
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 16
;; @todo test for overflow
IEM_MAYBE_LOAD_FLAGS A3, %2, %3
IEM_SAVE_FLAGS A3, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u16
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 16
;; @todo test for overflow
IEM_MAYBE_LOAD_FLAGS A3, %2, %3
IEM_SAVE_FLAGS A3, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u32
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20
;; @todo test for overflow
IEM_MAYBE_LOAD_FLAGS A3, %2, %3
IEM_SAVE_FLAGS A3, %2, %3
ENDPROC iemAImpl_ %+ %1 %+ _u64
%else ; stub it for now - later, replace with hand coded stuff.
BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 20
ENDPROC iemAImpl_ %+ %1 %+ _u64
IEMIMPL_DIV_OP div, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF)
IEMIMPL_DIV_OP idiv, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF)
; BSWAP. No flag changes.
; Each function takes one argument, pointer to the value to bswap
BEGINPROC_FASTCALL iemAImpl_bswap_u16, 4
mov T0_32, [A0] ; just in case any of the upper bits are used.
ENDPROC iemAImpl_bswap_u16
BEGINPROC_FASTCALL iemAImpl_bswap_u32, 4
ENDPROC iemAImpl_bswap_u32
BEGINPROC_FASTCALL iemAImpl_bswap_u64, 4
ENDPROC iemAImpl_bswap_u64
and T0, X86_FCW_MASK_ALL | X86_FCW_PC_MASK | X86_FCW_RC_MASK
; Initialize the FPU for x87 operation, loading the guest's status word.
; @param 1 Expression giving the address of the FXSTATE of the guest.
and T0, X86_FCW_MASK_ALL | X86_FCW_PC_MASK | X86_FCW_RC_MASK
; Need to move this as well somewhere better?
; Converts a 32-bit floating point value to a 80-bit one (fpu register).
; @param A0 FPU context (fxsave).
; @param A1 Pointer to a IEMFPURESULT for the output.
; @param A2 The 32-bit floating point value to convert.
BEGINPROC_FASTCALL iemAImpl_fpu_r32_to_r80, 12
ENDPROC iemAImpl_fpu_r32_to_r80
; Converts a 64-bit floating point value to a 80-bit one (fpu register).
; @param A0 FPU context (fxsave).
; @param A1 Pointer to a IEMFPURESULT for the output.
; @param A2 Pointer to the 64-bit floating point value to convert.
BEGINPROC_FASTCALL iemAImpl_fpu_r64_to_r80, 12
ENDPROC iemAImpl_fpu_r64_to_r80
; FDIV with 64-bit floating point value.
; @param A0 FPU context (fxsave).
; @param A1 Pointer to a IEMFPURESULT for the output.
; @param A2 Pointer to the 80-bit dividend.
; @param A3 Pointer to the 64-bit divisor.
BEGINPROC_FASTCALL iemAImpl_fpu_fdiv_r80_by_r64, 16
ENDPROC iemAImpl_fpu_fdiv_r80_by_r64