op_helper.c revision c0b1058291d8c255ca10f16942043b5f527efc65
/*
* i386 helpers
*
* Copyright (c) 2003 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Oracle elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#include "exec.h"
#include "exec-all.h"
#include "host-utils.h"
#include "ioport.h"
#ifdef VBOX
# include "qemu-common.h"
# include <math.h>
# include "tcg.h"
#endif /* VBOX */
//#define DEBUG_PCALL
#ifdef DEBUG_PCALL
# define LOG_PCALL_STATE(env) \
#else
# define LOG_PCALL(...) do { } while (0)
# define LOG_PCALL_STATE(env) do { } while (0)
#endif
#if 0
#define raise_exception_err(a, b)\
do {\
(raise_exception_err)(a, b);\
} while (0)
#endif
};
/* modulo 17 table */
0, 1, 2, 3, 4, 5, 6, 7,
8, 9,10,11,12,13,14,15,
16, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9,10,11,12,13,14,
};
/* modulo 9 table */
0, 1, 2, 3, 4, 5, 6, 7,
8, 0, 1, 2, 3, 4, 5, 6,
7, 8, 0, 1, 2, 3, 4, 5,
6, 7, 8, 0, 1, 2, 3, 4,
};
{
0.00000000000000000000L,
1.00000000000000000000L,
3.14159265358979323851L, /*pi*/
0.30102999566398119523L, /*lg2*/
0.69314718055994530943L, /*ln2*/
1.44269504088896340739L, /*l2e*/
3.32192809488736234781L, /*l2t*/
};
/* broken thread support */
void helper_lock(void)
{
}
void helper_unlock(void)
{
}
{
}
{
return eflags;
}
#ifdef VBOX
{
unsigned int new_eflags = t0;
/* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
/* if TF will be set -> #GP */
|| (new_eflags & TF_MASK)) {
} else {
if (new_eflags & IF_MASK) {
} else {
}
}
}
{
else
/* According to AMD manual, should be read with IOPL == 3 */
/* We only use helper_read_eflags_vme() in 16-bits mode */
return eflags & 0xffff;
}
void helper_dump_state()
{
LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
}
/**
* Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
* returns the updated e2.
*
* @returns e2 with A set.
* @param e2 The 2nd selector DWORD.
*/
{
e2 |= DESC_A_MASK;
return e2;
}
#endif /* VBOX */
/* return non zero if error */
int selector)
{
int index;
#ifdef VBOX
/* Trying to load a selector with CPL=1? */
/** @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
/** @todo in theory the iret could fault and we'd still need this. */
if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
{
}
#endif /* VBOX */
if (selector & 0x4)
else
return -1;
return 0;
}
{
unsigned int limit;
if (e2 & DESC_G_MASK)
return limit;
}
{
}
{
#ifdef VBOX
sc->newselector = 0;
#endif
}
/* init the segment cache in vm86 mode. */
{
selector &= 0xffff;
#ifdef VBOX
#else /* VBOX */
#endif /* VBOX */
}
{
#ifndef VBOX
#else
#endif
#if 0
{
int i;
}
printf("\n");
}
#endif
if (shift == 0) {
} else {
}
}
/* XXX: merge with load_seg() */
{
#ifdef VBOX
/* Trying to load a selector with CPL=1? */
{
}
#endif /* VBOX */
if ((selector & 0xfffc) != 0) {
if (!(e2 & DESC_S_MASK))
if (!(e2 & DESC_CS_MASK))
/* XXX: is it correct ? */
/* SS must be writable data */
} else {
/* not readable code */
/* if data or non conforming code, checks the rights */
}
}
if (!(e2 & DESC_P_MASK))
e2);
} else {
#ifdef VBOX
# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
0, 0, 0);
# endif
#endif /* VBOX */
}
}
#define SWITCH_TSS_JMP 0
#define SWITCH_TSS_IRET 1
#define SWITCH_TSS_CALL 2
/* XXX: restore CPU state in registers (PowerPC case) */
static void switch_tss(int tss_selector,
{
#ifndef VBOX
int index;
#else
unsigned int index;
#endif
/* if task gate, we read the TSS segment and we load it */
if (type == 5) {
if (!(e2 & DESC_P_MASK))
if (tss_selector & 4)
if (e2 & DESC_S_MASK)
}
if (!(e2 & DESC_P_MASK))
if (type & 8)
tss_limit_max = 103;
else
tss_limit_max = 43;
if ((tss_selector & 4) != 0 ||
if (old_type & 8)
old_tss_limit_max = 103;
else
old_tss_limit_max = 43;
/* read all the registers from the new TSS */
if (type & 8) {
/* 32 bit */
for(i = 0; i < 8; i++)
for(i = 0; i < 6; i++)
} else {
/* 16 bit */
new_cr3 = 0;
for(i = 0; i < 8; i++)
for(i = 0; i < 4; i++)
new_trap = 0;
}
/* NOTE: we must avoid memory exceptions during the task switch,
so we make dummy accesses before */
/* XXX: it can still fail in some cases, so a bigger hack is
necessary to valid the TLB after having done the accesses */
/* clear busy bit (it is restartable) */
e2 &= ~DESC_TSS_BUSY_MASK;
}
old_eflags = compute_eflags();
if (source == SWITCH_TSS_IRET)
old_eflags &= ~NT_MASK;
/* save the current state in the old TSS */
if (type & 8) {
/* 32 bit */
for(i = 0; i < 6; i++)
#ifdef VBOX
/* Must store the ldt as it gets reloaded and might have been changed. */
#endif
printf("TSS 32 bits switch\n");
#endif
} else {
/* 16 bit */
for(i = 0; i < 4; i++)
#ifdef VBOX
/* Must store the ldt as it gets reloaded and might have been changed. */
#endif
}
/* now if an exception occurs, it will occurs in the next task
context */
if (source == SWITCH_TSS_CALL) {
new_eflags |= NT_MASK;
}
/* set busy bit */
e2 |= DESC_TSS_BUSY_MASK;
}
/* set the new CPU state */
/* from this point, any exception which occurs can give problems */
#ifdef VBOX
#endif
}
/* load all registers without an exception, then reload them with
possible exception */
if (!(type & 8))
eflags_mask &= 0xffff;
/* XXX: what to do in 16 bit case ? */
if (new_eflags & VM_MASK) {
for(i = 0; i < 6; i++)
load_seg_vm(i, new_segs[i]);
/* in vm86, CPL is always 3 */
} else {
/* CPL is set the RPL of CS */
/* first just selectors as the rest may trigger exceptions */
for(i = 0; i < 6; i++)
}
#ifdef VBOX
#endif
/* load the LDT */
if (new_ldt & 4)
if ((new_ldt & 0xfffc) != 0) {
if (!(e2 & DESC_P_MASK))
}
/* load the segments */
if (!(new_eflags & VM_MASK)) {
}
/* check that EIP is in the CS segment limits */
/* XXX: different exception if CALL ? */
}
#ifndef CONFIG_USER_ONLY
/* reset local breakpoints */
for (i = 0; i < 4; i++) {
hw_breakpoint_remove(env, i);
}
}
#endif
}
/* check if Port I/O is allowed in TSS */
{
#ifndef VBOX
#else
unsigned int io_offset;
#endif /* VBOX */
/* TSS must be a valid 32 bit one */
goto fail;
/* Note: the check needs two bytes */
goto fail;
/* all bits must be zero to allow the I/O */
fail:
}
}
#ifdef VBOX
/* Keep in sync with gen_check_external_event() */
void helper_check_external_event()
{
{
}
}
{
}
#endif /* VBOX */
{
}
{
}
{
}
{
#ifndef VBOX
#else
#endif
}
{
#ifndef VBOX
#else
#endif
}
{
#ifndef VBOX
#else
#endif
}
{
#ifndef VBOX
#else
#endif
}
{
#ifndef VBOX
#else
#endif
}
{
#ifndef VBOX
#else
#endif
}
static inline unsigned int get_sp_mask(unsigned int e2)
{
if (e2 & DESC_B_MASK)
return 0xffffffff;
else
return 0xffff;
}
static int exeption_has_error_code(int intno)
{
switch(intno) {
case 8:
case 10:
case 11:
case 12:
case 13:
case 14:
case 17:
return 1;
}
return 0;
}
#ifdef TARGET_X86_64
do {\
if ((sp_mask) == 0xffff)\
else if ((sp_mask) == 0xffffffffLL)\
else\
} while (0)
#else
#endif
/* in 64-bit machines, this can overflow. So this segment addition macro
* can be used to trim the value to 32-bit whenever needed */
/* XXX: add a is_user flag to have proper security support */
{\
sp -= 2;\
}
{\
sp -= 4;\
}
{\
sp += 2;\
}
{\
sp += 4;\
}
/* protected mode interrupt */
{
#ifdef VBOX
#endif
has_error_code = 0;
if (is_int)
else
#ifndef VBOX
#else
#endif
/* check gate type */
switch(type) {
case 5: /* task gate */
#ifdef VBOX
/* check privilege if software int */
#endif
/* must do that check here to return the correct error code */
if (!(e2 & DESC_P_MASK))
if (has_error_code) {
int type;
/* push the error code */
mask = 0xffffffff;
else
mask = 0xffff;
if (shift)
else
}
return;
case 6: /* 286 interrupt gate */
case 7: /* 286 trap gate */
case 14: /* 386 interrupt gate */
case 15: /* 386 trap gate */
break;
default:
break;
}
/* check privilege if software int */
/* check valid bit */
if (!(e2 & DESC_P_MASK))
if ((selector & 0xfffc) == 0)
if (!(e2 & DESC_P_MASK))
/* to inner privilege */
if ((ss & 0xfffc) == 0)
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK))
if (!(ss_e2 & DESC_P_MASK))
#ifdef VBOX /* See page 3-477 of 253666.pdf */
#else
#endif
new_stack = 1;
#endif
/* to same privilege */
new_stack = 0;
} else {
new_stack = 0; /* avoid warning */
sp_mask = 0; /* avoid warning */
ssp = 0; /* avoid warning */
esp = 0; /* avoid warning */
}
#if 0
/* XXX: check that enough room is available */
push_size += 8;
#endif
if (shift == 1) {
if (new_stack) {
}
}
if (has_error_code) {
}
} else {
if (new_stack) {
}
}
if (has_error_code) {
}
}
if (new_stack) {
}
}
e2);
/* interrupt gate clear IF mask */
if ((type & 1) == 0) {
}
#ifndef VBOX
#else
/*
* gets confused by seemingly changed EFLAGS. See #3491 and
* public bug #2341.
*/
#endif
}
#ifdef VBOX
/* check if VME interrupt redirection is enabled in TSS */
{
unsigned int io_offset, intredir_offset;
/* TSS must be a valid 32 bit one */
goto fail;
/* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
/* the virtual interrupt redirection bitmap is located below the io bitmap */
goto fail;
/* bit set means no redirection. */
return false;
}
return true;
fail:
return true;
}
/* V86 mode software interrupt with CR4.VME=1 */
{
int selector;
if (!is_vme_irq_redirected(intno))
{
if (iopl == 3)
{
return;
}
else
}
/* virtual mode idt is at linear address 0 */
old_eflags = compute_eflags();
if (iopl < 3)
{
/* copy VIF into IF and set IOPL to 3 */
old_eflags |= IF_MASK;
else
old_eflags &= ~IF_MASK;
}
/* XXX: use SS segment size ? */
/* update processor state */
if (iopl < 3)
else
}
#endif /* VBOX */
#ifdef TARGET_X86_64
{\
sp -= 8;\
}
{\
sp += 8;\
}
{
int index;
#if 0
#endif
}
/* 64 bit interrupt */
{
int has_error_code, new_stack;
#ifdef VBOX
#endif
has_error_code = 0;
if (is_int)
else
/* check gate type */
switch(type) {
case 14: /* 386 interrupt gate */
case 15: /* 386 trap gate */
break;
default:
break;
}
/* check privilege if software int */
/* check valid bit */
if (!(e2 & DESC_P_MASK))
if ((selector & 0xfffc) == 0)
if (!(e2 & DESC_P_MASK))
/* to inner privilege */
if (ist != 0)
else
ss = 0;
new_stack = 1;
/* to same privilege */
new_stack = 0;
if (ist != 0)
else
} else {
new_stack = 0; /* avoid warning */
esp = 0; /* avoid warning */
}
if (has_error_code) {
}
if (new_stack) {
#ifndef VBOX
#else
#endif
}
e2);
/* interrupt gate clear IF mask */
if ((type & 1) == 0) {
}
#ifndef VBOX
#else /* VBOX */
/*
* gets confused by seemingly changed EFLAGS. See #3491 and
* public bug #2341.
*/
#endif /* VBOX */
}
#endif
#ifdef TARGET_X86_64
#if defined(CONFIG_USER_ONLY)
void helper_syscall(int next_eip_addend)
{
}
#else
void helper_syscall(int next_eip_addend)
{
int selector;
}
int code64;
cpu_x86_set_cpl(env, 0);
0, 0xffffffff,
0, 0xffffffff,
if (code64)
else
} else {
cpu_x86_set_cpl(env, 0);
0, 0xffffffff,
0, 0xffffffff,
}
}
#endif
#endif
#ifdef TARGET_X86_64
void helper_sysret(int dflag)
{
}
}
if (dflag == 2) {
0, 0xffffffff,
} else {
0, 0xffffffff,
}
0, 0xffffffff,
} else {
0, 0xffffffff,
0, 0xffffffff,
}
}
#endif
#ifdef VBOX
/**
* Checks and processes external VMM events.
* Called by op_check_external_event() when any of the flags is set and can be serviced.
*/
void helper_external_event(void)
{
# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
# ifdef RT_ARCH_AMD64
# else
# endif
# endif
/* Keep in sync with flags checked by gen_check_external_event() */
{
}
{
}
{
}
{
}
{
}
}
/* helper for recording call instruction addresses for later scanning */
void helper_record_call()
{
}
#endif /* VBOX */
/* real mode interrupt */
unsigned int next_eip)
{
int selector;
/* real mode (simpler !) */
#ifndef VBOX
#else
#endif
if (is_int)
else
/* XXX: use SS segment size ? */
/* update processor state */
}
/* fake user mode interrupt */
{
shift = 4;
} else {
shift = 3;
}
/* check privilege if software int */
/* Since we emulate only user space, we cannot do more than
exiting the emulation with the suitable exception and error
code */
if (is_int)
}
#if !defined(CONFIG_USER_ONLY)
{
if (!(event_inj & SVM_EVTINJ_VALID)) {
int type;
if (is_int)
else
}
}
}
#endif
/*
* Begin execution of an interruption. is_int is TRUE if coming from
* the int instruction. next_eip is the EIP value AFTER the interrupt
* instruction. It is only relevant if is_int is TRUE.
*/
{
if (qemu_loglevel_mask(CPU_LOG_INT)) {
static int count;
qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
if (intno == 0x0e) {
} else {
}
qemu_log("\n");
#if 0
{
int i;
qemu_log(" code=");
for(i = 0; i < 16; i++) {
}
qemu_log("\n");
}
#endif
count++;
}
}
#ifdef VBOX
if (is_int) {
RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
} else {
RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
}
}
#endif
#if !defined(CONFIG_USER_ONLY)
#endif
#ifdef TARGET_X86_64
} else
#endif
{
#ifdef VBOX
/* int xx *, v86 code and VME enabled? */
&& is_int
&& !is_hw
)
else
#endif /* VBOX */
}
} else {
#if !defined(CONFIG_USER_ONLY)
#endif
}
#if !defined(CONFIG_USER_ONLY)
}
#endif
}
/* This should come from sysemu.h - if we could include it here... */
void qemu_system_reset_request(void);
/*
* Check nested exceptions and change to double or triple fault if
* needed. It should only be called, if this is not an interrupt.
* Returns the new exception number.
*/
{
int second_contributory = intno == 0 ||
#if !defined(CONFIG_USER_ONLY)
# ifndef VBOX
# else
# endif
return EXCP_HLT;
}
#endif
if ((first_contributory && second_contributory)
intno = EXCP08_DBLE;
*error_code = 0;
}
(intno == EXCP08_DBLE))
return intno;
}
/*
* Signal an interruption. It is executed in the main CPU loop.
* is_int is TRUE if coming from the int instruction. next_eip is the
* EIP value AFTER the interrupt instruction. It is only relevant if
* is_int is TRUE.
*/
int next_eip_addend)
{
Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
#endif
if (!is_int) {
} else {
}
}
/* shortcuts to generate exceptions */
{
}
void raise_exception(int exception_index)
{
raise_interrupt(exception_index, 0, 0, 0);
}
{
}
/* SMM support */
#if defined(CONFIG_USER_ONLY)
void do_smm_enter(void)
{
}
void helper_rsm(void)
{
}
#else
#ifdef TARGET_X86_64
#define SMM_REVISION_ID 0x00020064
#else
#define SMM_REVISION_ID 0x00020000
#endif
void do_smm_enter(void)
{
int i, offset;
#ifdef TARGET_X86_64
for(i = 0; i < 6; i++) {
}
for(i = 8; i < 16; i++)
#else
for(i = 0; i < 6; i++) {
if (i < 3)
else
}
#endif
/* init SMM cpu state */
#ifdef TARGET_X86_64
cpu_load_efer(env, 0);
#endif
0xffffffff, 0);
cpu_x86_update_cr4(env, 0);
}
void helper_rsm(void)
{
#ifdef VBOX
#else /* !VBOX */
int i, offset;
#ifdef TARGET_X86_64
for(i = 0; i < 6; i++) {
}
#ifdef VBOX
#endif
#ifdef VBOX
#endif
for(i = 8; i < 16; i++)
if (val & 0x20000) {
}
#else
#ifdef VBOX
#endif
#ifdef VBOX
#endif
for(i = 0; i < 6; i++) {
if (i < 3)
else
}
if (val & 0x20000) {
}
#endif
#endif /* !VBOX */
}
#endif /* !CONFIG_USER_ONLY */
/* division, flags are undefined */
{
if (den == 0) {
}
if (q > 0xff)
q &= 0xff;
}
{
if (den == 0) {
}
if (q != (int8_t)q)
q &= 0xff;
}
{
if (den == 0) {
}
if (q > 0xffff)
q &= 0xffff;
}
{
if (den == 0) {
}
if (q != (int16_t)q)
q &= 0xffff;
}
{
unsigned int den, r;
if (den == 0) {
}
if (q > 0xffffffff)
}
{
int den, r;
if (den == 0) {
}
if (q != (int32_t)q)
}
/* bcd */
/* XXX: exception */
void helper_aam(int base)
{
}
void helper_aad(int base)
{
}
void helper_aaa(void)
{
int icarry;
int eflags;
} else {
al &= 0x0f;
}
}
void helper_aas(void)
{
int icarry;
int eflags;
} else {
al &= 0x0f;
}
}
void helper_daa(void)
{
int eflags;
eflags = 0;
}
}
/* well, speed is not an issue here, so we compute the flags by hand */
}
void helper_das(void)
{
int eflags;
eflags = 0;
}
}
/* well, speed is not an issue here, so we compute the flags by hand */
}
void helper_into(int next_eip_addend)
{
int eflags;
}
}
{
uint64_t d;
int eflags;
} else {
/* always do the store */
}
}
#ifdef TARGET_X86_64
{
int eflags;
if ((a0 & 0xf) != 0)
} else {
/* always do the store */
}
}
#endif
void helper_single_step(void)
{
#ifndef CONFIG_USER_ONLY
#endif
}
void helper_cpuid(void)
{
}
{
if (data32) {
/* 32 bit */
esp -= 4;
while (--level) {
esp -= 4;
ebp -= 4;
}
esp -= 4;
} else {
/* 16 bit */
esp -= 2;
while (--level) {
esp -= 2;
ebp -= 2;
}
esp -= 2;
}
}
#ifdef TARGET_X86_64
{
if (data64) {
/* 64 bit */
esp -= 8;
while (--level) {
esp -= 8;
ebp -= 8;
}
esp -= 8;
} else {
/* 16 bit */
esp -= 2;
while (--level) {
esp -= 2;
ebp -= 2;
}
esp -= 2;
}
}
#endif
void helper_lldt(int selector)
{
#ifndef VBOX
int index, entry_limit;
#else
unsigned int index, entry_limit;
#endif
#ifdef VBOX
Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
(RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
#endif
selector &= 0xffff;
if ((selector & 0xfffc) == 0) {
/* XXX: NULL selector case: invalid LDT */
#ifdef VBOX
#endif
} else {
if (selector & 0x4)
#ifdef TARGET_X86_64
entry_limit = 15;
else
#endif
entry_limit = 7;
if (!(e2 & DESC_P_MASK))
#ifdef TARGET_X86_64
} else
#endif
{
}
}
#ifdef VBOX
Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
#endif
}
void helper_ltr(int selector)
{
#ifndef VBOX
#else
unsigned int index;
int type, entry_limit;
#endif
#ifdef VBOX
Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
#endif
selector &= 0xffff;
if ((selector & 0xfffc) == 0) {
/* NULL selector case: invalid TR */
#ifdef VBOX
#endif
} else {
if (selector & 0x4)
#ifdef TARGET_X86_64
entry_limit = 15;
else
#endif
entry_limit = 7;
if ((e2 & DESC_S_MASK) ||
if (!(e2 & DESC_P_MASK))
#ifdef TARGET_X86_64
} else
#endif
{
}
e2 |= DESC_TSS_BUSY_MASK;
}
#ifdef VBOX
Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
#endif
}
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
{
#ifndef VBOX
int index;
#else
unsigned int index;
#endif
selector &= 0xffff;
#ifdef VBOX
/* Trying to load a selector with CPL=1? */
{
}
#endif /* VBOX */
if ((selector & 0xfffc) == 0) {
/* null selector case */
#ifndef VBOX
#ifdef TARGET_X86_64
#endif
)
#else
} else {
}
#endif
} else {
if (selector & 0x4)
else
if (!(e2 & DESC_S_MASK))
/* must be writable segment */
} else {
/* must be readable segment */
/* if not conforming code, test rights */
}
}
if (!(e2 & DESC_P_MASK)) {
else
}
/* set the access bit if not already set */
if (!(e2 & DESC_A_MASK)) {
e2 |= DESC_A_MASK;
}
e2);
#if 0
qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
#endif
}
}
/* protected mode jump */
int next_eip_addend)
{
#ifdef VBOX /** @todo Why do we do this? */
#endif
if ((new_cs & 0xfffc) == 0)
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
if (e2 & DESC_C_MASK) {
/* conforming code segment */
} else {
/* non conforming code segment */
}
if (!(e2 & DESC_P_MASK))
} else {
/* jump to call or task gate */
switch(type) {
case 1: /* 286 TSS */
case 9: /* 386 TSS */
case 5: /* task gate */
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
if (!(e2 & DESC_P_MASK))
if (type == 12)
/* must be code segment */
(DESC_S_MASK | DESC_CS_MASK)))
if (!(e2 & DESC_P_MASK))
#ifdef VBOX /* See page 3-514 of 253666.pdf */
#else
#endif
break;
default:
break;
}
}
}
/* real mode call */
{
int new_eip;
if (shift) {
} else {
}
}
/* protected mode call */
int shift, int next_eip_addend)
{
int new_stack, i;
#ifdef VBOX /** @todo Why do we do this? */
#endif
if ((new_cs & 0xfffc) == 0)
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
if (e2 & DESC_C_MASK) {
/* conforming code segment */
} else {
/* non conforming code segment */
}
if (!(e2 & DESC_P_MASK))
#ifdef TARGET_X86_64
/* XXX: check 16/32 bit cases in long mode */
if (shift == 2) {
/* 64 bit case */
/* from this point, not restartable */
} else
#endif
{
if (shift) {
} else {
}
/* from this point, not restartable */
}
} else {
/* check gate type */
switch(type) {
case 1: /* available 286 TSS */
case 9: /* available 386 TSS */
case 5: /* task gate */
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
break;
default:
break;
}
/* check valid bit */
if (!(e2 & DESC_P_MASK))
if ((selector & 0xfffc) == 0)
if (!(e2 & DESC_P_MASK))
/* to inner privilege */
if ((ss & 0xfffc) == 0)
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK))
if (!(ss_e2 & DESC_P_MASK))
#ifdef VBOX /* See page 3-99 of 253666.pdf */
#else
#endif
// push_size = ((param_count * 2) + 8) << shift;
if (shift) {
for(i = param_count - 1; i >= 0; i--) {
}
} else {
for(i = param_count - 1; i >= 0; i--) {
}
}
new_stack = 1;
} else {
/* to same privilege */
// push_size = (4 << shift);
new_stack = 0;
}
if (shift) {
} else {
}
/* from this point, not restartable */
if (new_stack) {
ssp,
ss_e2);
}
e2);
}
}
/* real and vm86 mode iret */
void helper_iret_real(int shift)
{
int eflags_mask;
#ifdef VBOX
bool fVME = false;
#endif /* VBOX */
if (shift == 1) {
/* 32 bits */
new_cs &= 0xffff;
} else {
/* 16 bits */
}
#ifdef VBOX
{
fVME = true;
/* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
/* if TF will be set -> #GP */
|| (new_eflags & TF_MASK))
}
#endif /* VBOX */
#ifdef VBOX
if (fVME)
else
#endif
else
if (shift == 0)
eflags_mask &= 0xffff;
#ifdef VBOX
if (fVME)
{
if (new_eflags & IF_MASK)
else
}
#endif /* VBOX */
}
{
int dpl;
/* XXX: on x86_64, we do not want to nullify FS and GS because
they may still contain a valid base. I would be interested to
know how a real x86_64 CPU behaves */
return;
/* data or non conforming code segment */
}
}
}
/* protected mode iret */
{
#ifdef VBOX /** @todo Why do we do this? */
#endif
#ifdef TARGET_X86_64
if (shift == 2)
sp_mask = -1;
else
#endif
new_eflags = 0; /* avoid warning */
#ifdef TARGET_X86_64
if (shift == 2) {
new_cs &= 0xffff;
if (is_iret) {
}
} else
#endif
if (shift == 1) {
/* 32 bits */
new_cs &= 0xffff;
if (is_iret) {
#define LOG_GROUP LOG_GROUP_REM
#endif
if (new_eflags & VM_MASK)
goto return_to_vm86;
}
#ifdef VBOX
{
{
}
else
{
/* Ugly assumption: assume a genuine switch to ring-1. */
Log(("Genuine switch to ring-1 (iret)\n"));
}
}
{
}
#endif
} else {
/* 16 bits */
if (is_iret)
}
if ((new_cs & 0xfffc) == 0)
{
Log(("new_cs & 0xfffc) == 0\n"));
#endif
}
{
Log(("load_segment failed\n"));
#endif
}
if (!(e2 & DESC_S_MASK) ||
!(e2 & DESC_CS_MASK))
{
#endif
}
{
#endif
}
if (e2 & DESC_C_MASK) {
{
#endif
}
} else {
{
#endif
}
}
if (!(e2 & DESC_P_MASK))
{
#endif
}
/* return to same privilege level */
#ifdef VBOX
if (!(e2 & DESC_A_MASK))
#endif
e2);
} else {
/* return to different privilege level */
#ifdef TARGET_X86_64
if (shift == 2) {
new_ss &= 0xffff;
} else
#endif
if (shift == 1) {
/* 32 bits */
new_ss &= 0xffff;
} else {
/* 16 bits */
}
if ((new_ss & 0xfffc) == 0) {
#ifdef TARGET_X86_64
/* NULL ss is allowed in long mode if cpl != 3*/
# ifndef VBOX
/* XXX: test CS64 ? */
0, 0xffffffff,
} else
# else /* VBOX */
if (!(e2 & DESC_A_MASK))
0, 0xffffffff,
} else
# endif
#endif
{
#endif
}
} else {
{
#endif
}
{
#endif
}
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK))
{
#endif
}
{
#endif
}
if (!(ss_e2 & DESC_P_MASK))
{
#endif
}
#ifdef VBOX
if (!(e2 & DESC_A_MASK))
if (!(ss_e2 & DESC_A_MASK))
#endif
ss_e2);
}
e2);
#ifdef TARGET_X86_64
sp_mask = -1;
else
#endif
/* validate data segments */
}
if (is_iret) {
/* NOTE: 'cpl' is the _old_ CPL */
if (cpl == 0)
#ifdef VBOX
#else
eflags_mask |= IOPL_MASK;
#endif
eflags_mask |= IF_MASK;
if (shift == 0)
eflags_mask &= 0xffff;
}
return;
/* modify processor state */
}
{
int tss_selector, type;
#ifdef VBOX
#endif
/* specific case for TSS */
#ifdef TARGET_X86_64
{
Log(("eflags.NT=1 on iret in long mode\n"));
#endif
}
#endif
if (tss_selector & 4)
/* NOTE: we check both segment and busy TSS */
if (type != 3)
} else {
}
}
{
}
void helper_sysenter(void)
{
if (env->sysenter_cs == 0) {
}
cpu_x86_set_cpl(env, 0);
#ifdef TARGET_X86_64
0, 0xffffffff,
} else
#endif
{
0, 0xffffffff,
}
0, 0xffffffff,
}
void helper_sysexit(int dflag)
{
int cpl;
}
#ifdef TARGET_X86_64
if (dflag == 2) {
0, 0xffffffff,
0, 0xffffffff,
} else
#endif
{
0, 0xffffffff,
0, 0xffffffff,
}
}
#if defined(CONFIG_USER_ONLY)
{
return 0;
}
{
}
{
}
#else
{
switch(reg) {
default:
break;
case 8:
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
} else {
}
break;
}
return val;
}
{
switch(reg) {
case 0:
break;
case 3:
break;
case 4:
break;
case 8:
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
}
break;
default:
break;
}
}
{
int i;
if (reg < 4) {
# ifndef VBOX
} else if (reg == 7) {
# else
# endif
for (i = 0; i < 4; i++)
hw_breakpoint_remove(env, i);
for (i = 0; i < 4; i++)
hw_breakpoint_insert(env, i);
} else
# ifndef VBOX
# else
# endif
}
#endif
{
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero
if already set to one. */
helper_write_crN(0, t0);
}
void helper_clts(void)
{
}
{
}
void helper_rdtsc(void)
{
}
}
void helper_rdtscp(void)
{
helper_rdtsc();
#ifndef VBOX
#else /* VBOX */
else
ECX = 0;
#endif /* VBOX */
}
void helper_rdpmc(void)
{
#ifdef VBOX
/* If X86_CR4_PCE is *not* set, then CPL must be zero. */
}
/* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
EAX = 0;
EDX = 0;
#else /* !VBOX */
}
/* currently unimplemented */
#endif /* !VBOX */
}
#if defined(CONFIG_USER_ONLY)
void helper_wrmsr(void)
{
}
void helper_rdmsr(void)
{
}
#else
void helper_wrmsr(void)
{
case MSR_IA32_SYSENTER_CS:
break;
case MSR_IA32_SYSENTER_ESP:
break;
case MSR_IA32_SYSENTER_EIP:
break;
case MSR_IA32_APICBASE:
# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
# endif
break;
case MSR_EFER:
{
update_mask = 0;
(val & update_mask));
}
break;
case MSR_STAR:
break;
case MSR_PAT:
break;
case MSR_VM_HSAVE_PA:
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
break;
case MSR_CSTAR:
break;
case MSR_FMASK:
break;
case MSR_FSBASE:
break;
case MSR_GSBASE:
break;
case MSR_KERNELGSBASE:
break;
#endif
# ifndef VBOX
case MSR_MTRRphysBase(0):
case MSR_MTRRphysBase(1):
case MSR_MTRRphysBase(2):
case MSR_MTRRphysBase(3):
case MSR_MTRRphysBase(4):
case MSR_MTRRphysBase(5):
case MSR_MTRRphysBase(6):
case MSR_MTRRphysBase(7):
break;
case MSR_MTRRphysMask(0):
case MSR_MTRRphysMask(1):
case MSR_MTRRphysMask(2):
case MSR_MTRRphysMask(3):
case MSR_MTRRphysMask(4):
case MSR_MTRRphysMask(5):
case MSR_MTRRphysMask(6):
case MSR_MTRRphysMask(7):
break;
case MSR_MTRRfix64K_00000:
break;
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
break;
case MSR_MTRRfix4K_C0000:
case MSR_MTRRfix4K_C8000:
case MSR_MTRRfix4K_D0000:
case MSR_MTRRfix4K_D8000:
case MSR_MTRRfix4K_E0000:
case MSR_MTRRfix4K_E8000:
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
break;
case MSR_MTRRdefType:
break;
case MSR_MCG_STATUS:
break;
case MSR_MCG_CTL:
break;
case MSR_TSC_AUX:
break;
# endif /* !VBOX */
default:
# ifndef VBOX
if ((offset & 0x3) != 0
break;
}
/* XXX: exception ? */
# endif
break;
}
# ifdef VBOX
/* call CPUM. */
{
/** @todo be a brave man and raise a \#GP(0) here as we should... */
}
# endif
}
void helper_rdmsr(void)
{
case MSR_IA32_SYSENTER_CS:
break;
case MSR_IA32_SYSENTER_ESP:
break;
case MSR_IA32_SYSENTER_EIP:
break;
case MSR_IA32_APICBASE:
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
break;
case MSR_EFER:
break;
case MSR_STAR:
break;
case MSR_PAT:
break;
case MSR_VM_HSAVE_PA:
break;
# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
case MSR_IA32_PERF_STATUS:
/* tsc_increment_by_tick */
val = 1000ULL;
/* CPU multiplier */
break;
# endif /* !VBOX */
#ifdef TARGET_X86_64
case MSR_LSTAR:
break;
case MSR_CSTAR:
break;
case MSR_FMASK:
break;
case MSR_FSBASE:
break;
case MSR_GSBASE:
break;
case MSR_KERNELGSBASE:
break;
# ifndef VBOX
case MSR_TSC_AUX:
break;
# endif /*!VBOX*/
#endif
# ifndef VBOX
case MSR_MTRRphysBase(0):
case MSR_MTRRphysBase(1):
case MSR_MTRRphysBase(2):
case MSR_MTRRphysBase(3):
case MSR_MTRRphysBase(4):
case MSR_MTRRphysBase(5):
case MSR_MTRRphysBase(6):
case MSR_MTRRphysBase(7):
break;
case MSR_MTRRphysMask(0):
case MSR_MTRRphysMask(1):
case MSR_MTRRphysMask(2):
case MSR_MTRRphysMask(3):
case MSR_MTRRphysMask(4):
case MSR_MTRRphysMask(5):
case MSR_MTRRphysMask(6):
case MSR_MTRRphysMask(7):
break;
case MSR_MTRRfix64K_00000:
break;
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
break;
case MSR_MTRRfix4K_C0000:
case MSR_MTRRfix4K_C8000:
case MSR_MTRRfix4K_D0000:
case MSR_MTRRfix4K_D8000:
case MSR_MTRRfix4K_E0000:
case MSR_MTRRfix4K_E8000:
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
break;
case MSR_MTRRdefType:
break;
case MSR_MTRRcap:
else
/* XXX: exception ? */
val = 0;
break;
case MSR_MCG_CAP:
break;
case MSR_MCG_CTL:
else
val = 0;
break;
case MSR_MCG_STATUS:
break;
# endif /* !VBOX */
default:
# ifndef VBOX
break;
}
/* XXX: exception ? */
val = 0;
# else /* VBOX */
{
/** @todo be a brave man and raise a \#GP(0) here as we should... */
val = 0;
}
# endif /* VBOX */
break;
}
# ifdef VBOX_STRICT
val = 0;
AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
}
# endif
}
#endif
{
unsigned int limit;
if ((selector & 0xfffc) == 0)
goto fail;
goto fail;
if (e2 & DESC_S_MASK) {
/* conforming */
} else {
goto fail;
}
} else {
switch(type) {
case 1:
case 2:
case 3:
case 9:
case 11:
break;
default:
goto fail;
}
fail:
return 0;
}
}
return limit;
}
{
if ((selector & 0xfffc) == 0)
goto fail;
goto fail;
if (e2 & DESC_S_MASK) {
/* conforming */
} else {
goto fail;
}
} else {
switch(type) {
case 1:
case 2:
case 3:
case 4:
case 5:
case 9:
case 11:
case 12:
break;
default:
goto fail;
}
fail:
return 0;
}
}
return e2 & 0x00f0ff00;
}
{
if ((selector & 0xfffc) == 0)
goto fail;
goto fail;
if (!(e2 & DESC_S_MASK))
goto fail;
if (e2 & DESC_CS_MASK) {
if (!(e2 & DESC_R_MASK))
goto fail;
if (!(e2 & DESC_C_MASK)) {
goto fail;
}
} else {
fail:
return;
}
}
}
{
if ((selector & 0xfffc) == 0)
goto fail;
goto fail;
if (!(e2 & DESC_S_MASK))
goto fail;
if (e2 & DESC_CS_MASK) {
goto fail;
} else {
goto fail;
if (!(e2 & DESC_W_MASK)) {
fail:
return;
}
}
}
/* x87 FPU helpers */
static void fpu_set_exception(int mask)
{
}
{
if (b == 0.0)
return a / b;
}
static void fpu_raise_exception(void)
{
}
#if !defined(CONFIG_USER_ONLY)
else {
}
#endif
}
{
union {
float32 f;
uint32_t i;
} u;
u.i = val;
}
{
union {
float64 f;
uint64_t i;
} u;
u.i = val;
}
{
}
{
int new_fpstt;
union {
float32 f;
uint32_t i;
} u;
u.i = val;
}
{
int new_fpstt;
union {
float64 f;
uint64_t i;
} u;
u.i = val;
}
{
int new_fpstt;
}
{
int new_fpstt;
}
#ifndef VBOX
uint32_t helper_fsts_ST0(void)
#else
RTCCUINTREG helper_fsts_ST0(void)
#endif
{
union {
float32 f;
uint32_t i;
} u;
return u.i;
}
uint64_t helper_fstl_ST0(void)
{
union {
float64 f;
uint64_t i;
} u;
return u.i;
}
#ifndef VBOX
int32_t helper_fist_ST0(void)
#else
RTCCINTREG helper_fist_ST0(void)
#endif
{
val = -32768;
return val;
}
#ifndef VBOX
int32_t helper_fistl_ST0(void)
#else
RTCCINTREG helper_fistl_ST0(void)
#endif
{
return val;
}
int64_t helper_fistll_ST0(void)
{
return val;
}
#ifndef VBOX
int32_t helper_fistt_ST0(void)
#else
RTCCINTREG helper_fistt_ST0(void)
#endif
{
val = -32768;
return val;
}
#ifndef VBOX
int32_t helper_fisttl_ST0(void)
#else
RTCCINTREG helper_fisttl_ST0(void)
#endif
{
return val;
}
int64_t helper_fisttll_ST0(void)
{
return val;
}
{
int new_fpstt;
}
{
}
void helper_fpush(void)
{
fpush();
}
void helper_fpop(void)
{
fpop();
}
void helper_fdecstp(void)
{
}
void helper_fincstp(void)
{
}
/* FPU move */
void helper_ffree_STN(int st_index)
{
}
void helper_fmov_ST0_FT0(void)
{
}
void helper_fmov_FT0_STN(int st_index)
{
}
void helper_fmov_ST0_STN(int st_index)
{
}
void helper_fmov_STN_ST0(int st_index)
{
}
void helper_fxchg_ST0_STN(int st_index)
{
}
/* FPU operations */
void helper_fcom_ST0_FT0(void)
{
int ret;
}
void helper_fucom_ST0_FT0(void)
{
int ret;
}
void helper_fcomi_ST0_FT0(void)
{
int eflags;
int ret;
}
void helper_fucomi_ST0_FT0(void)
{
int eflags;
int ret;
}
void helper_fadd_ST0_FT0(void)
{
}
void helper_fmul_ST0_FT0(void)
{
}
void helper_fsub_ST0_FT0(void)
{
}
void helper_fsubr_ST0_FT0(void)
{
}
void helper_fdiv_ST0_FT0(void)
{
}
void helper_fdivr_ST0_FT0(void)
{
}
/* fp operations between STN and ST0 */
void helper_fadd_STN_ST0(int st_index)
{
}
void helper_fmul_STN_ST0(int st_index)
{
}
void helper_fsub_STN_ST0(int st_index)
{
}
void helper_fsubr_STN_ST0(int st_index)
{
CPU86_LDouble *p;
*p = ST0 - *p;
}
void helper_fdiv_STN_ST0(int st_index)
{
CPU86_LDouble *p;
*p = helper_fdiv(*p, ST0);
}
void helper_fdivr_STN_ST0(int st_index)
{
CPU86_LDouble *p;
*p = helper_fdiv(ST0, *p);
}
/* misc FPU operations */
void helper_fchs_ST0(void)
{
}
void helper_fabs_ST0(void)
{
}
void helper_fld1_ST0(void)
{
}
void helper_fldl2t_ST0(void)
{
}
void helper_fldl2e_ST0(void)
{
}
void helper_fldpi_ST0(void)
{
}
void helper_fldlg2_ST0(void)
{
}
void helper_fldln2_ST0(void)
{
}
void helper_fldz_ST0(void)
{
}
void helper_fldz_FT0(void)
{
}
#ifndef VBOX
uint32_t helper_fnstsw(void)
#else
RTCCUINTREG helper_fnstsw(void)
#endif
{
}
#ifndef VBOX
uint32_t helper_fnstcw(void)
#else
RTCCUINTREG helper_fnstcw(void)
#endif
{
}
static void update_fp_status(void)
{
int rnd_type;
/* set rounding mode */
default:
case RC_NEAR:
break;
case RC_DOWN:
break;
case RC_UP:
break;
case RC_CHOP:
break;
}
#ifdef FLOATX80
case 0:
rnd_type = 32;
break;
case 2:
rnd_type = 64;
break;
case 3:
default:
rnd_type = 80;
break;
}
#endif
}
{
}
void helper_fclex(void)
{
}
void helper_fwait(void)
{
}
void helper_fninit(void)
{
}
/* BCD ops */
{
unsigned int v;
int i;
val = 0;
for(i = 8; i >= 0; i--) {
}
fpush();
}
{
int v;
if (val < 0) {
} else {
}
if (val == 0)
break;
v = val % 100;
v = ((v / 10) << 4) | (v % 10);
}
}
}
void helper_f2xm1(void)
{
}
void helper_fyl2x(void)
{
if (fptemp>0.0){
fpop();
} else {
}
}
void helper_fptan(void)
{
} else {
fpush();
ST0 = 1.0;
/* the above code is for |arg| < 2**52 only */
}
}
void helper_fpatan(void)
{
fpop();
}
void helper_fxtract(void)
{
unsigned int expdif;
/*DP exponent bias*/
fpush();
}
void helper_fprem1(void)
{
int expdif;
signed long long int q;
#else
#endif
return;
}
if (expdif < 0) {
/* optimisation? taken from the AMD docs */
/* ST0 is unchanged */
return;
}
if (expdif < 53) {
/* round dblq towards nearest integer */
/* convert dblq to q by truncating towards zero */
if (dblq < 0.0)
q = (signed long long int)(-dblq);
else
q = (signed long long int)dblq;
/* (C0,C3,C1) <-- (q2,q1,q0) */
} else {
/* fpsrcop = integer obtained by chopping */
}
}
void helper_fprem(void)
{
int expdif;
signed long long int q;
#else
#endif
return;
}
if (expdif < 0) {
/* optimisation? taken from the AMD docs */
/* ST0 is unchanged */
return;
}
if ( expdif < 53 ) {
/* round dblq towards zero */
/* convert dblq to q by truncating towards zero */
if (dblq < 0.0)
q = (signed long long int)(-dblq);
else
q = (signed long long int)dblq;
/* (C0,C3,C1) <-- (q2,q1,q0) */
} else {
/* fpsrcop = integer obtained by chopping */
}
}
void helper_fyl2xp1(void)
{
fpop();
} else {
}
}
void helper_fsqrt(void)
{
if (fptemp<0.0) {
}
}
void helper_fsincos(void)
{
} else {
fpush();
/* the above code is for |arg| < 2**63 only */
}
}
void helper_frndint(void)
{
}
void helper_fscale(void)
{
}
void helper_fsin(void)
{
} else {
/* the above code is for |arg| < 2**53 only */
}
}
void helper_fcos(void)
{
} else {
/* the above code is for |arg5 < 2**63 only */
}
}
void helper_fxam_ST0(void)
{
int expdif;
/* XXX: test fptags too */
#ifdef USE_X86LDOUBLE
#else
#endif
else
} else if (expdif == 0) {
else
} else {
}
}
{
fptag = 0;
for (i=7; i>=0; i--) {
fptag <<= 2;
fptag |= 3;
} else {
/* zero */
fptag |= 1;
#ifdef USE_X86LDOUBLE
#endif
) {
/* NaNs, infinity, denormal */
fptag |= 2;
}
}
}
if (data32) {
/* 32 bit */
} else {
/* 16 bit */
}
}
{
if (data32) {
}
else {
}
for(i = 0;i < 8; i++) {
fptag >>= 2;
}
}
{
int i;
for(i = 0;i < 8; i++) {
ptr += 10;
}
/* fninit */
}
{
int i;
for(i = 0;i < 8; i++) {
ptr += 10;
}
}
{
/* The operand must be 16 byte aligned */
if (ptr & 0xf) {
}
fptag = 0;
for(i = 0; i < 8; i++) {
}
#ifdef TARGET_X86_64
if (data64) {
} else
#endif
{
}
for(i = 0;i < 8; i++) {
addr += 16;
}
/* XXX: finish it */
nb_xmm_regs = 16;
else
nb_xmm_regs = 8;
/* Fast FXSAVE leaves out the XMM registers */
for(i = 0; i < nb_xmm_regs; i++) {
addr += 16;
}
}
}
}
{
/* The operand must be 16 byte aligned */
if (ptr & 0xf) {
}
fptag ^= 0xff;
for(i = 0;i < 8; i++) {
}
for(i = 0;i < 8; i++) {
addr += 16;
}
/* XXX: finish it */
//ldl(ptr + 0x1c);
nb_xmm_regs = 16;
else
nb_xmm_regs = 8;
/* Fast FXRESTORE leaves out the XMM registers */
for(i = 0; i < nb_xmm_regs; i++) {
#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
# if 1
# else
/* this works fine on Mac OS X, gcc 4.0.1 */
# endif
#endif
addr += 16;
}
}
}
}
#ifndef USE_X86LDOUBLE
{
int e;
temp.d = f;
/* mantissa */
/* exponent + sign */
*pexp = e;
}
{
int e;
/* XXX: handle overflow ? */
#ifdef __arm__
#else
#endif
return temp.d;
}
#else
{
temp.d = f;
}
{
return temp.d;
}
#endif
#ifdef TARGET_X86_64
//#define DEBUG_MULDIV
{
*plow += a;
/* carry test */
if (*plow < a)
(*phigh)++;
*phigh += b;
}
{
}
/* return TRUE if overflow */
{
if (a1 == 0) {
q = a0 / b;
r = a0 % b;
*plow = q;
*phigh = r;
} else {
if (a1 >= b)
return 1;
/* XXX: use a better algorithm */
for(i = 0; i < 64; i++) {
a1 -= b;
qb = 1;
} else {
qb = 0;
}
}
#if defined(DEBUG_MULDIV)
printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
#endif
}
return 0;
}
/* return TRUE if overflow */
{
if (sa)
sb = (b < 0);
if (sb)
b = -b;
return 1;
return 1;
} else {
return 1;
}
if (sa)
return 0;
}
{
}
{
}
{
return r0;
}
{
if (t0 == 0) {
}
}
{
if (t0 == 0) {
}
}
#endif
static void do_hlt(void)
{
}
void helper_hlt(int next_eip_addend)
{
EIP += next_eip_addend;
do_hlt();
}
{
#ifdef VBOX
#else /* !VBOX */
#endif /* !VBOX */
/* XXX: store address ? */
}
void helper_mwait(int next_eip_addend)
{
#ifdef VBOX
#else /* !VBOX */
EIP += next_eip_addend;
/* XXX: not complete but not completely erroneous */
/* more than one CPU: do not sleep because another CPU may
wake this one */
} else {
do_hlt();
}
#endif /* !VBOX */
}
void helper_debug(void)
{
}
void helper_reset_rf(void)
{
}
{
}
void helper_raise_exception(int exception_index)
{
}
void helper_cli(void)
{
}
void helper_sti(void)
{
}
#ifdef VBOX
void helper_cli_vme(void)
{
}
void helper_sti_vme(void)
{
/* First check, then change eflags according to the AMD manual */
}
}
#endif /* VBOX */
#if 0
/* vm86plus instructions */
void helper_cli_vm(void)
{
}
void helper_sti_vm(void)
{
}
}
#endif
void helper_set_inhibit_irq(void)
{
}
void helper_reset_inhibit_irq(void)
{
}
{
v = (int16_t)v;
}
}
{
}
}
static float approx_rsqrt(float a)
{
return 1.0 / sqrt(a);
}
static float approx_rcp(float a)
{
return 1.0 / a;
}
#if !defined(CONFIG_USER_ONLY)
#define SHIFT 0
#include "softmmu_template.h"
#define SHIFT 1
#include "softmmu_template.h"
#define SHIFT 2
#include "softmmu_template.h"
#define SHIFT 3
#include "softmmu_template.h"
#endif
#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
/* This code assumes real physical address always fit into host CPU reg,
which is wrong in general, but true for our current use cases. */
{
return remR3PhysReadS8(addr);
}
{
return remR3PhysReadU8(addr);
}
{
}
{
return remR3PhysReadS16(addr);
}
{
return remR3PhysReadU16(addr);
}
{
}
{
return remR3PhysReadS32(addr);
}
{
return remR3PhysReadU32(addr);
}
{
}
{
return remR3PhysReadU64(addr);
}
{
}
#endif /* VBOX */
#if !defined(CONFIG_USER_ONLY)
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
{
int ret;
/* XXX: hack to restore env in all cases, even if not called from
generated code */
if (ret) {
if (retaddr) {
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
}
}
}
#endif
#ifdef VBOX
/**
* Correctly computes the eflags.
* @returns eflags.
* @param env1 CPU environment.
*/
{
efl = compute_eflags();
return efl;
}
/**
* Reads byte from virtual address in guest memory area.
* XXX: is it working for any addresses? swapped out pages?
* @returns read data byte.
* @param env1 CPU environment.
* @param pvAddr GC Virtual address.
*/
{
return u8;
}
/**
* Reads byte from virtual address in guest memory area.
* XXX: is it working for any addresses? swapped out pages?
* @returns read data byte.
* @param env1 CPU environment.
* @param pvAddr GC Virtual address.
*/
{
return u16;
}
/**
* Reads byte from virtual address in guest memory area.
* XXX: is it working for any addresses? swapped out pages?
* @returns read data byte.
* @param env1 CPU environment.
* @param pvAddr GC Virtual address.
*/
{
return u32;
}
/**
* Writes byte to virtual address in guest memory area.
* XXX: is it working for any addresses? swapped out pages?
* @returns read data byte.
* @param env1 CPU environment.
* @param pvAddr GC Virtual address.
* @param val byte value
*/
{
}
{
}
{
}
/**
* Correctly loads selector into segment register with updating internal
* @param env1 CPU environment.
* @param seg_reg Segment register.
* @param selector Selector to load.
*/
{
#ifdef FORCE_SEGMENT_SYNC
#endif
{
/* Successful sync. */
}
else
{
time critical - let's not do that */
#ifdef FORCE_SEGMENT_SYNC
#endif
{
{
e2);
}
else
/* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
loading 0 selectors, what, in order, lead to subtle problems like #3588 */
/* Successful sync. */
}
else
{
/* Postpone sync until the guest uses the selector. */
env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
env1->error_code = 0;
}
#ifdef FORCE_SEGMENT_SYNC
#endif
}
}
{
}
{
int flags;
/* ensures env is loaded! */
/*
* Translate only one instruction.
*/
/* tb_link_phys: */
tb_reset_jump(tb, 0);
/*
* Execute it using emulation
*/
/*
* eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
* perhaps not a very safe hack
*/
{
#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
int fake_ret;
#else
#endif
/*
* Exit once we detect an external interrupt and interrupts are enabled
*/
)
{
break;
}
}
}
/*
Assert(tb->tb_next_offset[0] == 0xffff);
Assert(tb->tb_next_offset[1] == 0xffff);
Assert(tb->tb_next[0] == 0xffff);
Assert(tb->tb_next[1] == 0xffff);
Assert(tb->jmp_next[0] == NULL);
Assert(tb->jmp_next[1] == NULL);
Assert(tb->jmp_first == NULL); */
/*
* Execute the next instruction when we encounter instruction fusing.
*/
{
Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
}
return 0;
}
/**
* Correctly loads a new ldtr selector.
*
* @param env1 CPU environment.
* @param selector Selector to load.
*/
{
{
}
else
{
#ifdef VBOX_STRICT
#endif
}
}
{
{
return 0;
}
//raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
if (shift == 0) {
} else {
}
return 1;
}
//*****************************************************************************
// Needs to be at the bottom of the file (overriding macros)
{
#ifdef USE_X86LDOUBLE
return tmp.d;
#else
return *(CPU86_LDouble *)ptr;
#endif
}
{
#ifdef USE_X86LDOUBLE
tmp.d = f;
AssertCompile(sizeof(long double) > 8);
#else
*(CPU86_LDouble *)ptr = f;
#endif
}
//*****************************************************************************
{
{
fptag = 0;
for(i = 0; i < 8; i++) {
}
for(i = 0;i < 8; i++) {
addr += 16;
}
/* XXX: finish it */
for(i = 0; i < nb_xmm_regs; i++) {
#if __GNUC__ < 4
#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
#endif
addr += 16;
}
}
}
else
{
int fptag;
fptag = 0;
for (i=7; i>=0; i--) {
fptag <<= 2;
fptag |= 3;
} else {
/* the FPU automatically computes it */
}
}
for(i = 0;i < 8; i++) {
}
}
}
//*****************************************************************************
//*****************************************************************************
{
int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
{
fptag ^= 0xff;
for(i = 0;i < 8; i++) {
}
for(i = 0;i < 8; i++) {
addr += 16;
}
/* XXX: finish it, endianness */
//ldl(ptr + 0x1c);
for(i = 0; i < nb_xmm_regs; i++) {
#if HC_ARCH_BITS == 32
/* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
#else
#endif
addr += 16;
}
}
}
else
{
int fptag, j;
for(i = 0;i < 8; i++) {
fptag >>= 2;
}
for(i = 0;i < 8; i++) {
}
}
}
//*****************************************************************************
//*****************************************************************************
#endif /* VBOX */
/* Secure Virtual Machine helpers */
#if defined(CONFIG_USER_ONLY)
{
}
void helper_vmmcall(void)
{
}
void helper_vmload(int aflag)
{
}
void helper_vmsave(int aflag)
{
}
void helper_stgi(void)
{
}
void helper_clgi(void)
{
}
void helper_skinit(void)
{
}
void helper_invlpga(int aflag)
{
}
{
}
{
}
{
}
#else
const SegmentCache *sc)
{
}
{
unsigned int flags;
}
{
}
{
if (aflag == 2)
else
/* save the current CPU state in the hsave page */
EIP + next_eip_addend);
/* load the interception bitmaps so we do not need to access the
vmcb in svm mode */
env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
/* enable intercepts */
/* clear exit_info_2 so we behave like the real hardware */
if (int_ctl & V_INTR_MASKING_MASK) {
}
/* FIXME: guest state consistency checks */
case TLB_CONTROL_DO_NOTHING:
break;
/* FIXME: this is not 100% correct but should work for now */
break;
}
if (int_ctl & V_IRQ_MASK) {
}
/* maybe we need to inject an event */
if (event_inj & SVM_EVTINJ_VALID) {
/* FIXME: need to implement valid_err */
switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
case SVM_EVTINJ_TYPE_INTR:
env->exception_is_int = 0;
/* XXX: is it always correct ? */
break;
case SVM_EVTINJ_TYPE_NMI:
env->exception_is_int = 0;
break;
case SVM_EVTINJ_TYPE_EXEPT:
env->exception_is_int = 0;
break;
case SVM_EVTINJ_TYPE_SOFT:
break;
}
}
}
void helper_vmmcall(void)
{
}
void helper_vmload(int aflag)
{
if (aflag == 2)
else
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
#ifdef TARGET_X86_64
#endif
}
void helper_vmsave(int aflag)
{
if (aflag == 2)
else
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
#ifdef TARGET_X86_64
#endif
}
void helper_stgi(void)
{
}
void helper_clgi(void)
{
}
void helper_skinit(void)
{
/* XXX: not implemented */
}
void helper_invlpga(int aflag)
{
if (aflag == 2)
else
/* XXX: could use the ASID to see if it is needed to do the
flush */
}
{
return;
#ifndef VBOX
switch(type) {
}
break;
}
break;
}
break;
}
break;
}
break;
case SVM_EXIT_MSR:
/* FIXME: this should be read in at vmrun (faster this way?) */
case 0 ... 0x1fff:
break;
case 0xc0000000 ... 0xc0001fff:
t0 %= 8;
break;
case 0xc0010000 ... 0xc0011fff:
t0 %= 8;
break;
default:
t0 = 0;
t1 = 0;
break;
}
}
break;
default:
}
break;
}
#else /* VBOX */
AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
#endif /* VBOX */
}
{
/* FIXME: this should be read in at vmrun (faster this way?) */
/* next EIP */
}
}
}
/* Note: currently only 32 bits of exit_code are used */
{
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
EIP);
} else {
}
/* Save the VM state in the vmcb */
int_ctl |= V_IRQ_MASK;
/* Reload the host state from vm_hsave */
env->intercept_exceptions = 0;
env->tsc_offset = 0;
/* we need to set the efer after the crs so the hidden flags get
set properly */
/* other setups */
cpu_x86_set_cpl(env, 0);
/* FIXME: Resets the current ASID register to zero (host ASID). */
/* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
/* Clears the TSC_OFFSET inside the processor. */
/* If the host is in PAE mode, the processor reloads the host's PDPEs
from the page table indicated the host's CR3. If the PDPEs contain
illegal state, the processor causes a shutdown. */
/* Forces CR0.PE = 1, RFLAGS.VM = 0. */
/* Disables all breakpoints in the host DR7 register. */
/* Checks the reloaded host state for consistency. */
/* If the host's rIP reloaded by #VMEXIT is outside the limit of the
host's code segment or non-canonical (in the case of long mode), a
#GP fault is delivered inside the host.) */
/* remove any pending exception */
env->error_code = 0;
}
#endif
/* XXX: optimize by storing fptt and fptags in the static cpu state */
void helper_enter_mmx(void)
{
}
void helper_emms(void)
{
/* set to empty state */
}
/* XXX: suppress */
void helper_movq(void *d, void *s)
{
}
#define SHIFT 0
#include "ops_sse.h"
#define SHIFT 1
#include "ops_sse.h"
#define SHIFT 0
#include "helper_template.h"
#define SHIFT 1
#include "helper_template.h"
#define SHIFT 2
#include "helper_template.h"
#ifdef TARGET_X86_64
#define SHIFT 3
#include "helper_template.h"
#endif
/* bit operations */
{
int count;
count = 0;
while ((res & 1) == 0) {
count++;
res >>= 1;
}
return count;
}
{
int count;
return wordsize;
}
count--;
res <<= 1;
}
if (wordsize > 0) {
}
return count;
}
{
return helper_lzcnt(t0, 0);
}
static int compute_all_eflags(void)
{
return CC_SRC;
}
static int compute_c_eflags(void)
{
}
{
switch (op) {
default: /* should never happen */ return 0;
case CC_OP_EFLAGS: return compute_all_eflags();
case CC_OP_MULB: return compute_all_mulb();
case CC_OP_MULW: return compute_all_mulw();
case CC_OP_MULL: return compute_all_mull();
case CC_OP_ADDB: return compute_all_addb();
case CC_OP_ADDW: return compute_all_addw();
case CC_OP_ADDL: return compute_all_addl();
case CC_OP_ADCB: return compute_all_adcb();
case CC_OP_ADCW: return compute_all_adcw();
case CC_OP_ADCL: return compute_all_adcl();
case CC_OP_SUBB: return compute_all_subb();
case CC_OP_SUBW: return compute_all_subw();
case CC_OP_SUBL: return compute_all_subl();
case CC_OP_SBBB: return compute_all_sbbb();
case CC_OP_SBBW: return compute_all_sbbw();
case CC_OP_SBBL: return compute_all_sbbl();
case CC_OP_LOGICB: return compute_all_logicb();
case CC_OP_LOGICW: return compute_all_logicw();
case CC_OP_LOGICL: return compute_all_logicl();
case CC_OP_INCB: return compute_all_incb();
case CC_OP_INCW: return compute_all_incw();
case CC_OP_INCL: return compute_all_incl();
case CC_OP_DECB: return compute_all_decb();
case CC_OP_DECW: return compute_all_decw();
case CC_OP_DECL: return compute_all_decl();
case CC_OP_SHLB: return compute_all_shlb();
case CC_OP_SHLW: return compute_all_shlw();
case CC_OP_SHLL: return compute_all_shll();
case CC_OP_SARB: return compute_all_sarb();
case CC_OP_SARW: return compute_all_sarw();
case CC_OP_SARL: return compute_all_sarl();
#ifdef TARGET_X86_64
case CC_OP_MULQ: return compute_all_mulq();
case CC_OP_ADDQ: return compute_all_addq();
case CC_OP_ADCQ: return compute_all_adcq();
case CC_OP_SUBQ: return compute_all_subq();
case CC_OP_SBBQ: return compute_all_sbbq();
case CC_OP_LOGICQ: return compute_all_logicq();
case CC_OP_INCQ: return compute_all_incq();
case CC_OP_DECQ: return compute_all_decq();
case CC_OP_SHLQ: return compute_all_shlq();
case CC_OP_SARQ: return compute_all_sarq();
#endif
}
}
{
switch (op) {
default: /* should never happen */ return 0;
case CC_OP_EFLAGS: return compute_c_eflags();
case CC_OP_MULB: return compute_c_mull();
case CC_OP_MULW: return compute_c_mull();
case CC_OP_MULL: return compute_c_mull();
case CC_OP_ADDB: return compute_c_addb();
case CC_OP_ADDW: return compute_c_addw();
case CC_OP_ADDL: return compute_c_addl();
case CC_OP_ADCB: return compute_c_adcb();
case CC_OP_ADCW: return compute_c_adcw();
case CC_OP_ADCL: return compute_c_adcl();
case CC_OP_SUBB: return compute_c_subb();
case CC_OP_SUBW: return compute_c_subw();
case CC_OP_SUBL: return compute_c_subl();
case CC_OP_SBBB: return compute_c_sbbb();
case CC_OP_SBBW: return compute_c_sbbw();
case CC_OP_SBBL: return compute_c_sbbl();
case CC_OP_LOGICB: return compute_c_logicb();
case CC_OP_LOGICW: return compute_c_logicw();
case CC_OP_LOGICL: return compute_c_logicl();
case CC_OP_INCB: return compute_c_incl();
case CC_OP_INCW: return compute_c_incl();
case CC_OP_INCL: return compute_c_incl();
case CC_OP_DECB: return compute_c_incl();
case CC_OP_DECW: return compute_c_incl();
case CC_OP_DECL: return compute_c_incl();
case CC_OP_SHLB: return compute_c_shlb();
case CC_OP_SHLW: return compute_c_shlw();
case CC_OP_SHLL: return compute_c_shll();
case CC_OP_SARB: return compute_c_sarl();
case CC_OP_SARW: return compute_c_sarl();
case CC_OP_SARL: return compute_c_sarl();
#ifdef TARGET_X86_64
case CC_OP_MULQ: return compute_c_mull();
case CC_OP_ADDQ: return compute_c_addq();
case CC_OP_ADCQ: return compute_c_adcq();
case CC_OP_SUBQ: return compute_c_subq();
case CC_OP_SBBQ: return compute_c_sbbq();
case CC_OP_LOGICQ: return compute_c_logicq();
case CC_OP_INCQ: return compute_c_incl();
case CC_OP_DECQ: return compute_c_incl();
case CC_OP_SHLQ: return compute_c_shlq();
case CC_OP_SARQ: return compute_c_sarl();
#endif
}
}