helper.c revision 4f655b6c15f33ffa88db4a6627ba4a882c47d0e7
/*
* i386 helpers
*
* Copyright (c) 2003 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Sun elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#ifdef VBOX
# ifdef VBOX_WITH_VMI
# endif
#endif
#include "exec.h"
//#define DEBUG_PCALL
#if 0
#define raise_exception_err(a, b)\
do {\
if (logfile)\
(raise_exception_err)(a, b);\
} while (0)
#endif
};
/* modulo 17 table */
0, 1, 2, 3, 4, 5, 6, 7,
8, 9,10,11,12,13,14,15,
16, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9,10,11,12,13,14,
};
/* modulo 9 table */
0, 1, 2, 3, 4, 5, 6, 7,
8, 0, 1, 2, 3, 4, 5, 6,
7, 8, 0, 1, 2, 3, 4, 5,
6, 7, 8, 0, 1, 2, 3, 4,
};
{
0.00000000000000000000L,
1.00000000000000000000L,
3.14159265358979323851L, /*pi*/
0.30102999566398119523L, /*lg2*/
0.69314718055994530943L, /*ln2*/
1.44269504088896340739L, /*l2e*/
3.32192809488736234781L, /*l2t*/
};
/* thread support */
void cpu_lock(void)
{
}
void cpu_unlock(void)
{
}
void cpu_loop_exit(void)
{
/* NOTE: the register at this point must be saved by hand because
longjmp restore them */
regs_to_env();
}
/* return non zero if error */
int selector)
{
int index;
if (selector & 0x4)
else
return -1;
return 0;
}
{
unsigned int limit;
if (e2 & DESC_G_MASK)
return limit;
}
{
}
{
}
/* init the segment cache in vm86 mode. */
{
selector &= 0xffff;
#ifdef VBOX
flags |= DESC_CS_MASK;
#else
#endif
}
{
#if 0
{
int i;
}
printf("\n");
}
#endif
if (shift == 0) {
} else {
}
}
/* XXX: merge with load_seg() */
{
if ((selector & 0xfffc) != 0) {
if (!(e2 & DESC_S_MASK))
if (!(e2 & DESC_CS_MASK))
/* XXX: is it correct ? */
/* SS must be writable data */
} else {
/* not readable code */
/* if data or non conforming code, checks the rights */
}
}
if (!(e2 & DESC_P_MASK))
e2);
} else {
}
}
#define SWITCH_TSS_JMP 0
#define SWITCH_TSS_IRET 1
#define SWITCH_TSS_CALL 2
/* XXX: restore CPU state in registers (PowerPC case) */
static void switch_tss(int tss_selector,
{
int index;
#ifdef DEBUG_PCALL
if (loglevel & CPU_LOG_PCALL)
#endif
#endif
/* if task gate, we read the TSS segment and we load it */
if (type == 5) {
if (!(e2 & DESC_P_MASK))
if (tss_selector & 4)
if (e2 & DESC_S_MASK)
}
if (!(e2 & DESC_P_MASK))
if (type & 8)
tss_limit_max = 103;
else
tss_limit_max = 43;
if ((tss_selector & 4) != 0 ||
if (old_type & 8)
old_tss_limit_max = 103;
else
old_tss_limit_max = 43;
/* read all the registers from the new TSS */
if (type & 8) {
/* 32 bit */
for(i = 0; i < 8; i++)
for(i = 0; i < 6; i++)
} else {
/* 16 bit */
new_cr3 = 0;
for(i = 0; i < 8; i++)
for(i = 0; i < 4; i++)
new_trap = 0;
}
/* NOTE: we must avoid memory exceptions during the task switch,
so we make dummy accesses before */
/* XXX: it can still fail in some cases, so a bigger hack is
necessary to valid the TLB after having done the accesses */
/* clear busy bit (it is restartable) */
e2 &= ~DESC_TSS_BUSY_MASK;
}
old_eflags = compute_eflags();
if (source == SWITCH_TSS_IRET)
old_eflags &= ~NT_MASK;
/* save the current state in the old TSS */
if (type & 8) {
/* 32 bit */
for(i = 0; i < 6; i++)
printf("TSS 32 bits switch\n");
#endif
} else {
/* 16 bit */
for(i = 0; i < 4; i++)
}
/* now if an exception occurs, it will occurs in the next task
context */
if (source == SWITCH_TSS_CALL) {
new_eflags |= NT_MASK;
}
/* set busy bit */
e2 |= DESC_TSS_BUSY_MASK;
}
/* set the new CPU state */
/* from this point, any exception which occurs can give problems */
}
/* load all registers without an exception, then reload them with
possible exception */
if (!(type & 8))
eflags_mask &= 0xffff;
/* XXX: what to do in 16 bit case ? */
if (new_eflags & VM_MASK) {
for(i = 0; i < 6; i++)
load_seg_vm(i, new_segs[i]);
/* in vm86, CPL is always 3 */
} else {
/* CPL is set the RPL of CS */
/* first just selectors as the rest may trigger exceptions */
for(i = 0; i < 6; i++)
}
/* load the LDT */
if (new_ldt & 4)
if ((new_ldt & 0xfffc) != 0) {
if (!(e2 & DESC_P_MASK))
}
/* load the segments */
if (!(new_eflags & VM_MASK)) {
}
/* check that EIP is in the CS segment limits */
/* XXX: different exception if CALL ? */
}
}
/* check if Port I/O is allowed in TSS */
{
/* TSS must be a valid 32 bit one */
goto fail;
/* Note: the check needs two bytes */
goto fail;
/* all bits must be zero to allow the I/O */
fail:
}
}
void check_iob_T0(void)
{
}
void check_iow_T0(void)
{
}
void check_iol_T0(void)
{
}
void check_iob_DX(void)
{
}
void check_iow_DX(void)
{
}
void check_iol_DX(void)
{
}
static inline unsigned int get_sp_mask(unsigned int e2)
{
if (e2 & DESC_B_MASK)
return 0xffffffff;
else
return 0xffff;
}
#ifdef TARGET_X86_64
do {\
if ((sp_mask) == 0xffff)\
else if ((sp_mask) == 0xffffffffLL)\
else\
} while (0)
#else
#endif
/* XXX: add a is_user flag to have proper security support */
{\
sp -= 2;\
}
{\
sp -= 4;\
}
{\
sp += 2;\
}
{\
sp += 4;\
}
/* protected mode interrupt */
{
#ifdef VBOX
# ifdef VBOX_WITH_VMI
if ( intno == 6
{
}
# endif
#endif
has_error_code = 0;
switch(intno) {
case 8:
case 10:
case 11:
case 12:
case 13:
case 14:
case 17:
has_error_code = 1;
break;
}
}
if (is_int)
else
/* check gate type */
switch(type) {
case 5: /* task gate */
/* must do that check here to return the correct error code */
if (!(e2 & DESC_P_MASK))
if (has_error_code) {
int type;
/* push the error code */
mask = 0xffffffff;
else
mask = 0xffff;
if (shift)
else
}
return;
case 6: /* 286 interrupt gate */
case 7: /* 286 trap gate */
case 14: /* 386 interrupt gate */
case 15: /* 386 trap gate */
break;
default:
break;
}
/* check privledge if software int */
/* check valid bit */
if (!(e2 & DESC_P_MASK))
if ((selector & 0xfffc) == 0)
if (!(e2 & DESC_P_MASK))
/* to inner priviledge */
if ((ss & 0xfffc) == 0)
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK))
if (!(ss_e2 & DESC_P_MASK))
#ifdef VBOX /* See page 3-477 of 253666.pdf */
#else
#endif
new_stack = 1;
#endif
/* to same priviledge */
new_stack = 0;
} else {
new_stack = 0; /* avoid warning */
sp_mask = 0; /* avoid warning */
ssp = 0; /* avoid warning */
esp = 0; /* avoid warning */
}
#if 0
/* XXX: check that enough room is available */
push_size += 8;
#endif
if (shift == 1) {
if (new_stack) {
}
}
if (has_error_code) {
}
} else {
if (new_stack) {
}
}
if (has_error_code) {
}
}
if (new_stack) {
}
}
e2);
/* interrupt gate clear IF mask */
if ((type & 1) == 0) {
}
}
#ifdef VBOX
/* check if VME interrupt redirection is enabled in TSS */
static inline bool is_vme_irq_redirected(int intno)
{
int io_offset, intredir_offset;
/* TSS must be a valid 32 bit one */
goto fail;
/* the virtual interrupt redirection bitmap is located below the io bitmap */
goto fail;
/* bit set means no redirection. */
return false;
}
return true;
fail:
return true;
}
/* V86 mode software interrupt with CR4.VME=1 */
{
int selector;
if (!is_vme_irq_redirected(intno))
{
if (iopl == 3)
/* normal protected mode handler call */
else
}
/* virtual mode idt is at linear address 0 */
old_eflags = compute_eflags();
if (iopl < 3)
{
/* copy VIF into IF and set IOPL to 3 */
old_eflags |= IF_MASK;
else
old_eflags &= ~IF_MASK;
}
/* XXX: use SS segment size ? */
/* update processor state */
if (iopl < 3)
else
}
#endif /* VBOX */
#ifdef TARGET_X86_64
{\
sp -= 8;\
}
{\
sp += 8;\
}
{
int index;
#if 0
#endif
}
/* 64 bit interrupt */
{
int has_error_code, new_stack;
#ifdef VBOX
#endif
has_error_code = 0;
switch(intno) {
case 8:
case 10:
case 11:
case 12:
case 13:
case 14:
case 17:
has_error_code = 1;
break;
}
}
if (is_int)
else
/* check gate type */
switch(type) {
case 14: /* 386 interrupt gate */
case 15: /* 386 trap gate */
break;
default:
break;
}
/* check privledge if software int */
/* check valid bit */
if (!(e2 & DESC_P_MASK))
if ((selector & 0xfffc) == 0)
if (!(e2 & DESC_P_MASK))
/* to inner priviledge */
if (ist != 0)
else
ss = 0;
new_stack = 1;
/* to same priviledge */
new_stack = 0;
if (ist != 0)
else
} else {
new_stack = 0; /* avoid warning */
esp = 0; /* avoid warning */
}
if (has_error_code) {
}
if (new_stack) {
}
e2);
/* interrupt gate clear IF mask */
if ((type & 1) == 0) {
}
}
#endif
void helper_syscall(int next_eip_addend)
{
int selector;
}
#ifdef TARGET_X86_64
int code64;
cpu_x86_set_cpl(env, 0);
0, 0xffffffff,
0, 0xffffffff,
if (code64)
else
} else
#endif
{
cpu_x86_set_cpl(env, 0);
0, 0xffffffff,
0, 0xffffffff,
}
}
void helper_sysret(int dflag)
{
}
}
#ifdef TARGET_X86_64
if (dflag == 2) {
0, 0xffffffff,
} else {
0, 0xffffffff,
}
0, 0xffffffff,
} else
#endif
{
0, 0xffffffff,
0, 0xffffffff,
}
#ifdef USE_KQEMU
if (kqemu_is_ok(env)) {
}
#endif
}
#ifdef VBOX
/**
* Checks and processes external VMM events.
* Called by op_check_external_event() when any of the flags is set and can be serviced.
*/
void helper_external_event(void)
{
#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
#endif
{
}
{
}
{
}
{
}
}
/* helper for recording call instruction addresses for later scanning */
void helper_record_call()
{
}
#endif /* VBOX */
/* real mode interrupt */
unsigned int next_eip)
{
int selector;
/* real mode (simpler !) */
if (is_int)
else
/* XXX: use SS segment size ? */
/* update processor state */
}
/* fake user mode interrupt */
{
/* check privledge if software int */
/* Since we emulate only user space, we cannot do more than
exiting the emulation with the suitable exception and error
code */
if (is_int)
}
/*
* Begin execution of an interruption. is_int is TRUE if coming from
* the int instruction. next_eip is the EIP value AFTER the interrupt
* instruction. It is only relevant if is_int is TRUE.
*/
{
if (loglevel & CPU_LOG_INT) {
static int count;
fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
if (intno == 0x0e) {
} else {
}
#if 0
{
int i;
for(i = 0; i < 16; i++) {
}
}
#endif
count++;
}
}
#ifdef TARGET_X86_64
} else
#endif
{
#ifdef VBOX
/* int xx *, v86 code and VME enabled? */
&& is_int
&& !is_hw
)
else
#endif /* VBOX */
}
} else {
}
}
/*
* Signal an interruption. It is executed in the main CPU loop.
* is_int is TRUE if coming from the int instruction. next_eip is the
* EIP value AFTER the interrupt instruction. It is only relevant if
* is_int is TRUE.
*/
int next_eip_addend)
{
NOT_DMIK(Log2(("raise_interrupt: %x %x %x %VGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
#endif
}
/* same as raise_exception_err, but do not restore global registers */
{
env->exception_is_int = 0;
env->exception_next_eip = 0;
}
/* shortcuts to generate exceptions */
{
}
void raise_exception(int exception_index)
{
raise_interrupt(exception_index, 0, 0, 0);
}
/* SMM support */
#if defined(CONFIG_USER_ONLY)
void do_smm_enter(void)
{
}
void helper_rsm(void)
{
}
#else
#ifdef TARGET_X86_64
#define SMM_REVISION_ID 0x00020064
#else
#define SMM_REVISION_ID 0x00020000
#endif
void do_smm_enter(void)
{
#ifdef VBOX
#else /* !VBOX */
int i, offset;
if (loglevel & CPU_LOG_INT) {
}
#ifdef TARGET_X86_64
for(i = 0; i < 6; i++) {
}
for(i = 8; i < 16; i++)
#else
for(i = 0; i < 6; i++) {
if (i < 3)
else
}
#endif
/* init SMM cpu state */
#ifdef TARGET_X86_64
#endif
0xffffffff, 0);
cpu_x86_update_cr4(env, 0);
#endif /* VBOX */
}
void helper_rsm(void)
{
#ifdef VBOX
#else /* !VBOX */
int i, offset;
#ifdef TARGET_X86_64
else
for(i = 0; i < 6; i++) {
}
for(i = 8; i < 16; i++)
if (val & 0x20000) {
}
#else
for(i = 0; i < 6; i++) {
if (i < 3)
else
}
if (val & 0x20000) {
}
#endif
if (loglevel & CPU_LOG_INT) {
}
#endif /* !VBOX */
}
#endif /* !CONFIG_USER_ONLY */
#ifdef BUGGY_GCC_DIV64
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
call it from another function */
{
}
{
}
#endif
void helper_divl_EAX_T0(void)
{
unsigned int den, r;
if (den == 0) {
}
#ifdef BUGGY_GCC_DIV64
#else
#endif
if (q > 0xffffffff)
}
void helper_idivl_EAX_T0(void)
{
int den, r;
if (den == 0) {
}
#ifdef BUGGY_GCC_DIV64
#else
#endif
if (q != (int32_t)q)
}
void helper_cmpxchg8b(void)
{
uint64_t d;
int eflags;
} else {
/* always do the store */
}
}
void helper_single_step()
{
}
void helper_cpuid(void)
{
#ifndef VBOX
/* test if maximum index reached */
if (index & 0x80000000) {
} else {
}
switch(index) {
case 0:
break;
case 1:
break;
case 2:
/* cache info: needed for Pentium Pro compatibility */
EAX = 0x410601;
EBX = 0;
ECX = 0;
EDX = 0;
break;
case 0x80000000:
break;
case 0x80000001:
EBX = 0;
ECX = 0;
break;
case 0x80000002:
case 0x80000003:
case 0x80000004:
break;
case 0x80000005:
/* cache info (L1 cache) */
EAX = 0x01ff01ff;
EBX = 0x01ff01ff;
ECX = 0x40020140;
EDX = 0x40020140;
break;
case 0x80000006:
/* cache info (L2 cache) */
EAX = 0;
EBX = 0x42004200;
ECX = 0x02008140;
EDX = 0;
break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
EAX = 0x00003028;
EBX = 0;
ECX = 0;
EDX = 0;
break;
default:
/* reserved values: zero */
EAX = 0;
EBX = 0;
ECX = 0;
EDX = 0;
break;
}
#else /* VBOX */
#endif /* VBOX */
}
{
if (data32) {
/* 32 bit */
esp -= 4;
while (--level) {
esp -= 4;
ebp -= 4;
}
esp -= 4;
} else {
/* 16 bit */
esp -= 2;
while (--level) {
esp -= 2;
ebp -= 2;
}
esp -= 2;
}
}
#ifdef TARGET_X86_64
{
if (data64) {
/* 64 bit */
esp -= 8;
while (--level) {
esp -= 8;
ebp -= 8;
}
esp -= 8;
} else {
/* 16 bit */
esp -= 2;
while (--level) {
esp -= 2;
ebp -= 2;
}
esp -= 2;
}
}
#endif
void helper_lldt_T0(void)
{
int selector;
int index, entry_limit;
#ifdef VBOX
Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n",
#endif
if ((selector & 0xfffc) == 0) {
/* XXX: NULL selector case: invalid LDT */
} else {
if (selector & 0x4)
#ifdef TARGET_X86_64
entry_limit = 15;
else
#endif
entry_limit = 7;
if (!(e2 & DESC_P_MASK))
#ifdef TARGET_X86_64
} else
#endif
{
}
}
#ifdef VBOX
Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n",
#endif
}
void helper_ltr_T0(void)
{
int selector;
#ifdef VBOX
Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
#endif
if ((selector & 0xfffc) == 0) {
/* NULL selector case: invalid TR */
} else {
if (selector & 0x4)
#ifdef TARGET_X86_64
entry_limit = 15;
else
#endif
entry_limit = 7;
if ((e2 & DESC_S_MASK) ||
if (!(e2 & DESC_P_MASK))
#ifdef TARGET_X86_64
} else
#endif
{
}
e2 |= DESC_TSS_BUSY_MASK;
}
#ifdef VBOX
Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
#endif
}
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
{
int index;
selector &= 0xffff;
#ifdef VBOX
/* Trying to load a selector with CPL=1? */
{
}
#endif
if ((selector & 0xfffc) == 0) {
/* null selector case */
#ifdef TARGET_X86_64
#endif
)
} else {
if (selector & 0x4)
else
if (!(e2 & DESC_S_MASK))
/* must be writable segment */
} else {
/* must be readable segment */
/* if not conforming code, test rights */
}
}
if (!(e2 & DESC_P_MASK)) {
else
}
/* set the access bit if not already set */
if (!(e2 & DESC_A_MASK)) {
e2 |= DESC_A_MASK;
}
e2);
#if 0
#endif
}
}
/* protected mode jump */
void helper_ljmp_protected_T0_T1(int next_eip_addend)
{
if ((new_cs & 0xfffc) == 0)
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
if (e2 & DESC_C_MASK) {
/* conforming code segment */
} else {
/* non conforming code segment */
}
if (!(e2 & DESC_P_MASK))
} else {
/* jump to call or task gate */
switch(type) {
case 1: /* 286 TSS */
case 9: /* 386 TSS */
case 5: /* task gate */
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
if (!(e2 & DESC_P_MASK))
if (type == 12)
/* must be code segment */
(DESC_S_MASK | DESC_CS_MASK)))
if (!(e2 & DESC_P_MASK))
#ifdef VBOX /* See page 3-514 of 253666.pdf */
#else
#endif
break;
default:
break;
}
}
}
/* real mode call */
{
if (shift) {
} else {
}
}
/* protected mode call */
{
#ifdef DEBUG_PCALL
if (loglevel & CPU_LOG_PCALL) {
}
#endif
if ((new_cs & 0xfffc) == 0)
#ifdef DEBUG_PCALL
if (loglevel & CPU_LOG_PCALL) {
}
#endif
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
if (e2 & DESC_C_MASK) {
/* conforming code segment */
} else {
/* non conforming code segment */
}
if (!(e2 & DESC_P_MASK))
#ifdef TARGET_X86_64
/* XXX: check 16/32 bit cases in long mode */
if (shift == 2) {
/* 64 bit case */
/* from this point, not restartable */
} else
#endif
{
if (shift) {
} else {
}
/* from this point, not restartable */
}
} else {
/* check gate type */
switch(type) {
case 1: /* available 286 TSS */
case 9: /* available 386 TSS */
case 5: /* task gate */
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
break;
default:
break;
}
/* check valid bit */
if (!(e2 & DESC_P_MASK))
if ((selector & 0xfffc) == 0)
if (!(e2 & DESC_P_MASK))
/* to inner priviledge */
#ifdef DEBUG_PCALL
if (loglevel & CPU_LOG_PCALL)
#endif
if ((ss & 0xfffc) == 0)
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK))
if (!(ss_e2 & DESC_P_MASK))
#ifdef VBOX /* See page 3-99 of 253666.pdf */
#else
#endif
// push_size = ((param_count * 2) + 8) << shift;
if (shift) {
for(i = param_count - 1; i >= 0; i--) {
}
} else {
for(i = param_count - 1; i >= 0; i--) {
}
}
new_stack = 1;
} else {
/* to same priviledge */
// push_size = (4 << shift);
new_stack = 0;
}
if (shift) {
} else {
}
/* from this point, not restartable */
if (new_stack) {
ssp,
ss_e2);
}
e2);
}
#ifdef USE_KQEMU
if (kqemu_is_ok(env)) {
}
#endif
}
/* real and vm86 mode iret */
void helper_iret_real(int shift)
{
int eflags_mask;
#ifdef VBOX
bool fVME = false;
#endif /* VBOX */
if (shift == 1) {
/* 32 bits */
new_cs &= 0xffff;
} else {
/* 16 bits */
}
#ifdef VBOX
{
fVME = true;
/* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
/* if TF will be set -> #GP */
|| (new_eflags & TF_MASK))
}
#endif /* VBOX */
#ifdef VBOX
if (fVME)
else
#endif
else
if (shift == 0)
eflags_mask &= 0xffff;
#ifdef VBOX
if (fVME)
{
if (new_eflags & IF_MASK)
else
}
#endif /* VBOX */
}
{
int dpl;
/* XXX: on x86_64, we do not want to nullify FS and GS because
they may still contain a valid base. I would be interested to
know how a real x86_64 CPU behaves */
return;
/* data or non conforming code segment */
}
}
}
/* protected mode iret */
{
#ifdef TARGET_X86_64
if (shift == 2)
sp_mask = -1;
else
#endif
new_eflags = 0; /* avoid warning */
#ifdef TARGET_X86_64
if (shift == 2) {
new_cs &= 0xffff;
if (is_iret) {
}
} else
#endif
if (shift == 1) {
/* 32 bits */
new_cs &= 0xffff;
if (is_iret) {
#endif
if (new_eflags & VM_MASK)
goto return_to_vm86;
}
#ifdef VBOX
{
#ifdef DEBUG
#endif
}
#endif
} else {
/* 16 bits */
if (is_iret)
}
#ifdef DEBUG_PCALL
if (loglevel & CPU_LOG_PCALL) {
}
#endif
if ((new_cs & 0xfffc) == 0)
{
printf("new_cs & 0xfffc) == 0\n");
#endif
}
{
printf("load_segment failed\n");
#endif
}
if (!(e2 & DESC_S_MASK) ||
!(e2 & DESC_CS_MASK))
{
#endif
}
{
#endif
}
if (e2 & DESC_C_MASK) {
{
#endif
}
} else {
{
#endif
}
}
if (!(e2 & DESC_P_MASK))
{
#endif
}
/* return to same priledge level */
e2);
} else {
/* return to different priviledge level */
#ifdef TARGET_X86_64
if (shift == 2) {
new_ss &= 0xffff;
} else
#endif
if (shift == 1) {
/* 32 bits */
new_ss &= 0xffff;
} else {
/* 16 bits */
}
#ifdef DEBUG_PCALL
if (loglevel & CPU_LOG_PCALL) {
}
#endif
if ((new_ss & 0xfffc) == 0) {
#ifdef TARGET_X86_64
/* NULL ss is allowed in long mode if cpl != 3*/
/* XXX: test CS64 ? */
0, 0xffffffff,
} else
#endif
{
}
} else {
if (!(ss_e2 & DESC_S_MASK) ||
(ss_e2 & DESC_CS_MASK) ||
!(ss_e2 & DESC_W_MASK))
if (!(ss_e2 & DESC_P_MASK))
ss_e2);
}
e2);
#ifdef TARGET_X86_64
sp_mask = -1;
else
#endif
/* validate data segments */
}
if (is_iret) {
/* NOTE: 'cpl' is the _old_ CPL */
if (cpl == 0)
#ifdef VBOX
#else
eflags_mask |= IOPL_MASK;
#endif
eflags_mask |= IF_MASK;
if (shift == 0)
eflags_mask &= 0xffff;
}
return;
#if 0 // defined(VBOX) && defined(DEBUG)
#endif
/* modify processor state */
}
{
int tss_selector, type;
#ifdef VBOX
#endif
/* specific case for TSS */
#ifdef TARGET_X86_64
#endif
if (tss_selector & 4)
/* NOTE: we check both segment and busy TSS */
if (type != 3)
} else {
}
#ifdef USE_KQEMU
if (kqemu_is_ok(env)) {
}
#endif
}
{
#ifdef USE_KQEMU
if (kqemu_is_ok(env)) {
}
#endif
}
void helper_sysenter(void)
{
if (env->sysenter_cs == 0) {
}
cpu_x86_set_cpl(env, 0);
0, 0xffffffff,
0, 0xffffffff,
}
void helper_sysexit(void)
{
int cpl;
}
0, 0xffffffff,
0, 0xffffffff,
#ifdef USE_KQEMU
if (kqemu_is_ok(env)) {
}
#endif
}
void helper_movl_crN_T0(int reg)
{
#if !defined(CONFIG_USER_ONLY)
switch(reg) {
case 0:
break;
case 3:
break;
case 4:
break;
case 8:
break;
default:
break;
}
#endif
}
/* XXX: do more */
void helper_movl_drN_T0(int reg)
{
}
{
}
void helper_rdtsc(void)
{
}
}
#if defined(CONFIG_USER_ONLY)
void helper_wrmsr(void)
{
}
void helper_rdmsr(void)
{
}
#else
void helper_wrmsr(void)
{
case MSR_IA32_SYSENTER_CS:
break;
case MSR_IA32_SYSENTER_ESP:
break;
case MSR_IA32_SYSENTER_EIP:
break;
case MSR_IA32_APICBASE:
break;
case MSR_EFER:
{
update_mask = 0;
(val & update_mask);
}
break;
case MSR_STAR:
break;
case MSR_PAT:
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
break;
case MSR_CSTAR:
break;
case MSR_FMASK:
break;
case MSR_FSBASE:
break;
case MSR_GSBASE:
break;
case MSR_KERNELGSBASE:
break;
#endif
default:
#ifndef VBOX
/* XXX: exception ? */
break;
#else /* VBOX */
{
/* In X2APIC specification this range is reserved for APIC control. */
/** @todo else exception? */
break;
}
#endif /* VBOX */
}
}
void helper_rdmsr(void)
{
case MSR_IA32_SYSENTER_CS:
break;
case MSR_IA32_SYSENTER_ESP:
break;
case MSR_IA32_SYSENTER_EIP:
break;
case MSR_IA32_APICBASE:
break;
case MSR_EFER:
break;
case MSR_STAR:
break;
case MSR_PAT:
break;
#ifdef TARGET_X86_64
case MSR_LSTAR:
break;
case MSR_CSTAR:
break;
case MSR_FMASK:
break;
case MSR_FSBASE:
break;
case MSR_GSBASE:
break;
case MSR_KERNELGSBASE:
break;
#endif
default:
#ifndef VBOX
/* XXX: exception ? */
val = 0;
break;
#else /* VBOX */
{
/* In X2APIC specification this range is reserved for APIC control. */
else
val = 0; /** @todo else exception? */
break;
}
#endif /* VBOX */
}
}
#endif
void helper_lsl(void)
{
goto fail;
if (e2 & DESC_S_MASK) {
/* conforming */
} else {
goto fail;
}
} else {
switch(type) {
case 1:
case 2:
case 3:
case 9:
case 11:
break;
default:
goto fail;
}
fail:
return;
}
}
}
void helper_lar(void)
{
unsigned int selector;
if ((selector & 0xfffc) == 0)
goto fail;
goto fail;
if (e2 & DESC_S_MASK) {
/* conforming */
} else {
goto fail;
}
} else {
switch(type) {
case 1:
case 2:
case 3:
case 4:
case 5:
case 9:
case 11:
case 12:
break;
default:
goto fail;
}
fail:
return;
}
}
}
void helper_verr(void)
{
unsigned int selector;
if ((selector & 0xfffc) == 0)
goto fail;
goto fail;
if (!(e2 & DESC_S_MASK))
goto fail;
if (e2 & DESC_CS_MASK) {
if (!(e2 & DESC_R_MASK))
goto fail;
if (!(e2 & DESC_C_MASK)) {
goto fail;
}
} else {
fail:
return;
}
}
}
void helper_verw(void)
{
unsigned int selector;
if ((selector & 0xfffc) == 0)
goto fail;
goto fail;
if (!(e2 & DESC_S_MASK))
goto fail;
if (e2 & DESC_CS_MASK) {
goto fail;
} else {
goto fail;
if (!(e2 & DESC_W_MASK)) {
fail:
return;
}
}
}
/* FPU helpers */
void helper_fldt_ST0_A0(void)
{
int new_fpstt;
}
void helper_fstt_ST0_A0(void)
{
}
void fpu_set_exception(int mask)
{
}
{
if (b == 0.0)
return a / b;
}
void fpu_raise_exception(void)
{
}
#if !defined(CONFIG_USER_ONLY)
else {
}
#endif
}
/* BCD ops */
void helper_fbld_ST0_A0(void)
{
unsigned int v;
int i;
val = 0;
for(i = 8; i >= 0; i--) {
}
fpush();
}
void helper_fbst_ST0_A0(void)
{
int v;
if (val < 0) {
} else {
}
if (val == 0)
break;
v = val % 100;
v = ((v / 10) << 4) | (v % 10);
}
}
}
void helper_f2xm1(void)
{
}
void helper_fyl2x(void)
{
if (fptemp>0.0){
fpop();
} else {
}
}
void helper_fptan(void)
{
} else {
fpush();
ST0 = 1.0;
/* the above code is for |arg| < 2**52 only */
}
}
void helper_fpatan(void)
{
fpop();
}
void helper_fxtract(void)
{
unsigned int expdif;
/*DP exponent bias*/
fpush();
}
void helper_fprem1(void)
{
int expdif;
int q;
if (expdif < 53) {
q = (int)dblq; /* cutting off top bits is assumed here */
/* (C0,C1,C3) <-- (q2,q1,q0) */
} else {
/* fpsrcop = integer obtained by rounding to the nearest */
}
}
void helper_fprem(void)
{
int expdif;
int q;
if ( expdif < 53 ) {
q = (int)dblq; /* cutting off top bits is assumed here */
/* (C0,C1,C3) <-- (q2,q1,q0) */
} else {
/* fpsrcop = integer obtained by chopping */
}
}
void helper_fyl2xp1(void)
{
fpop();
} else {
}
}
void helper_fsqrt(void)
{
if (fptemp<0.0) {
}
}
void helper_fsincos(void)
{
} else {
fpush();
/* the above code is for |arg| < 2**63 only */
}
}
void helper_frndint(void)
{
}
void helper_fscale(void)
{
}
void helper_fsin(void)
{
} else {
/* the above code is for |arg| < 2**53 only */
}
}
void helper_fcos(void)
{
} else {
/* the above code is for |arg5 < 2**63 only */
}
}
void helper_fxam_ST0(void)
{
int expdif;
/* XXX: test fptags too */
#ifdef USE_X86LDOUBLE
#else
#endif
else
} else if (expdif == 0) {
else
} else {
}
}
{
fptag = 0;
for (i=7; i>=0; i--) {
fptag <<= 2;
fptag |= 3;
} else {
/* zero */
fptag |= 1;
#ifdef USE_X86LDOUBLE
#endif
) {
/* NaNs, infinity, denormal */
fptag |= 2;
}
}
}
if (data32) {
/* 32 bit */
} else {
/* 16 bit */
}
}
{
if (data32) {
}
else {
}
for(i = 0;i < 8; i++) {
fptag >>= 2;
}
}
{
int i;
for(i = 0;i < 8; i++) {
ptr += 10;
}
/* fninit */
}
{
int i;
for(i = 0;i < 8; i++) {
ptr += 10;
}
}
{
fptag = 0;
for(i = 0; i < 8; i++) {
}
for(i = 0;i < 8; i++) {
addr += 16;
}
/* XXX: finish it */
for(i = 0; i < nb_xmm_regs; i++) {
addr += 16;
}
}
}
{
fptag ^= 0xff;
for(i = 0;i < 8; i++) {
}
for(i = 0;i < 8; i++) {
addr += 16;
}
/* XXX: finish it */
//ldl(ptr + 0x1c);
for(i = 0; i < nb_xmm_regs; i++) {
#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
# if 1
# else
/* this works fine on Mac OS X, gcc 4.0.1 */
# endif
#endif
addr += 16;
}
}
}
#ifndef USE_X86LDOUBLE
{
int e;
temp.d = f;
/* mantissa */
/* exponent + sign */
*pexp = e;
}
{
int e;
/* XXX: handle overflow ? */
#ifdef __arm__
#else
#endif
return temp.d;
}
#else
{
temp.d = f;
}
{
return temp.d;
}
#endif
#ifdef TARGET_X86_64
//#define DEBUG_MULDIV
{
*plow += a;
/* carry test */
if (*plow < a)
(*phigh)++;
*phigh += b;
}
{
}
{
uint64_t v;
a0 = a;
a1 = a >> 32;
b0 = b;
b1 = b >> 32;
*plow = v;
*phigh = 0;
*phigh += v;
#ifdef DEBUG_MULDIV
#endif
}
{
sa = (a < 0);
if (sa)
a = -a;
sb = (b < 0);
if (sb)
b = -b;
}
}
/* return TRUE if overflow */
{
if (a1 == 0) {
q = a0 / b;
r = a0 % b;
*plow = q;
*phigh = r;
} else {
if (a1 >= b)
return 1;
/* XXX: use a better algorithm */
for(i = 0; i < 64; i++) {
a1 -= b;
qb = 1;
} else {
qb = 0;
}
}
#if defined(DEBUG_MULDIV)
printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
#endif
}
return 0;
}
/* return TRUE if overflow */
{
if (sa)
sb = (b < 0);
if (sb)
b = -b;
return 1;
return 1;
} else {
return 1;
}
if (sa)
return 0;
}
void helper_mulq_EAX_T0(void)
{
}
void helper_imulq_EAX_T0(void)
{
}
void helper_imulq_T0_T1(void)
{
}
void helper_divq_EAX_T0(void)
{
if (T0 == 0) {
}
}
void helper_idivq_EAX_T0(void)
{
if (T0 == 0) {
}
}
void helper_bswapq_T0(void)
{
}
#endif
void helper_hlt(void)
{
}
void helper_monitor(void)
{
/* XXX: store address ? */
}
void helper_mwait(void)
{
#ifdef VBOX
helper_hlt();
#else
/* XXX: not complete but not completely erroneous */
/* more than one CPU: do not sleep because another CPU may
wake this one */
} else {
helper_hlt();
}
#endif
}
float approx_rsqrt(float a)
{
return 1.0 / sqrt(a);
}
float approx_rcp(float a)
{
return 1.0 / a;
}
void update_fp_status(void)
{
int rnd_type;
/* set rounding mode */
default:
case RC_NEAR:
break;
case RC_DOWN:
break;
case RC_UP:
break;
case RC_CHOP:
break;
}
#ifdef FLOATX80
case 0:
rnd_type = 32;
break;
case 2:
rnd_type = 64;
break;
case 3:
default:
rnd_type = 80;
break;
}
#endif
}
#if !defined(CONFIG_USER_ONLY)
#define GETPC() (__builtin_return_address(0))
#define SHIFT 0
#include "softmmu_template.h"
#define SHIFT 1
#include "softmmu_template.h"
#define SHIFT 2
#include "softmmu_template.h"
#define SHIFT 3
#include "softmmu_template.h"
#endif
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
{
int ret;
unsigned long pc;
/* XXX: hack to restore env in all cases, even if not called from
generated code */
if (ret) {
if (retaddr) {
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
}
if (retaddr)
else
}
}
#ifdef VBOX
/**
* Correctly computes the eflags.
* @returns eflags.
* @param env1 CPU environment.
*/
{
return efl;
}
/**
* Reads byte from virtual address in guest memory area.
* XXX: is it working for any addresses? swapped out pages?
* @returns readed data byte.
* @param env1 CPU environment.
* @param pvAddr GC Virtual address.
*/
{
return u8;
}
/**
* Reads byte from virtual address in guest memory area.
* XXX: is it working for any addresses? swapped out pages?
* @returns readed data byte.
* @param env1 CPU environment.
* @param pvAddr GC Virtual address.
*/
{
return u16;
}
/**
* Reads byte from virtual address in guest memory area.
* XXX: is it working for any addresses? swapped out pages?
* @returns readed data byte.
* @param env1 CPU environment.
* @param pvAddr GC Virtual address.
*/
{
return u32;
}
/**
* Writes byte to virtual address in guest memory area.
* XXX: is it working for any addresses? swapped out pages?
* @returns readed data byte.
* @param env1 CPU environment.
* @param pvAddr GC Virtual address.
* @param val byte value
*/
{
}
{
}
{
}
/**
* Correctly loads selector into segment register with updating internal
* @param env1 CPU environment.
* @param seg_reg Segment register.
* @param selector Selector to load.
*/
{
{
/* Successful sync. */
}
else
{
{
{
e2);
}
else
/* Successful sync. */
}
else
{
/* Postpone sync until the guest uses the selector. */
env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
}
}
}
/**
* Correctly loads a new ldtr selector.
*
* @param env1 CPU environment.
* @param selector Selector to load.
*/
{
{
}
else
{
#ifdef VBOX_STRICT
#endif
}
}
/**
* Correctly loads a new tr selector.
*
* @param env1 CPU environment.
* @param selector Selector to load.
*/
{
/* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
selector &= 0xffff;
if ((selector & 0xfffc) == 0) {
/* NULL selector case: invalid TR */
} else {
if (selector & 0x4)
goto l_failure;
#ifdef TARGET_X86_64
entry_limit = 15;
else
#endif
entry_limit = 7;
goto l_failure;
(type != 1 && type != 9)*/)
goto l_failure;
if (!(e2 & DESC_P_MASK))
goto l_failure;
#ifdef TARGET_X86_64
} else
#endif
{
}
e2 |= DESC_TSS_BUSY_MASK;
}
return 0;
return -1;
}
{
#if 1 /* single stepping is broken when using a static tb... feel free to figure out why. :-) */
/* This has to be static because it needs to be addressible
using 32-bit immediate addresses on 64-bit machines. This
is dictated by the gcc code model used when building this
module / op.o. Using a static here pushes the problem
onto the module loader. */
static TranslationBlock tb_temp;
#endif
int csize;
void (*gen_func)(void);
/* ensures env is loaded in ebp! */
#if 1 /* see above */
#else
#endif
/*
* Setup temporary translation block.
*/
/* tb_alloc: */
#if 1 /* see above */
#else
if (!tb)
{
}
#endif
/* tb_find_slow: */
/* Initialize the rest with sensible values. */
/*
* Translate only one instruction.
*/
{
AssertFailed();
return -1;
}
#ifdef DEBUG
{
AssertFailed();
return -1;
}
{
AssertFailed();
return -1;
}
#endif
/* tb_link_phys: */
/*
* Execute it using emulation
*/
// eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
// perhaps not a very safe hack
{
gen_func();
/*
* Exit once we detect an external interrupt and interrupts are enabled
*/
{
break;
}
}
/*
Assert(tb->tb_next_offset[0] == 0xffff);
Assert(tb->tb_next_offset[1] == 0xffff);
Assert(tb->tb_next[0] == 0xffff);
Assert(tb->tb_next[1] == 0xffff);
Assert(tb->jmp_next[0] == NULL);
Assert(tb->jmp_next[1] == NULL);
Assert(tb->jmp_first == NULL); */
/*
* Execute the next instruction when we encounter instruction fusing.
*/
{
Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %VGv\n", env->eip));
}
return 0;
}
{
{
return 0;
}
//raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
if (shift == 0) {
} else {
}
return 1;
}
//*****************************************************************************
// Needs to be at the bottom of the file (overriding macros)
{
return *(CPU86_LDouble *)ptr;
}
{
*(CPU86_LDouble *)ptr = f;
}
#define data64 0
//*****************************************************************************
{
{
fptag = 0;
for(i = 0; i < 8; i++) {
}
for(i = 0;i < 8; i++) {
addr += 16;
}
/* XXX: finish it */
for(i = 0; i < nb_xmm_regs; i++) {
#if __GNUC__ < 4
#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
#endif
addr += 16;
}
}
}
else
{
int fptag;
fptag = 0;
for (i=7; i>=0; i--) {
fptag <<= 2;
fptag |= 3;
} else {
/* the FPU automatically computes it */
}
}
for(i = 0;i < 8; i++) {
}
}
}
//*****************************************************************************
//*****************************************************************************
{
{
fptag ^= 0xff;
for(i = 0;i < 8; i++) {
}
for(i = 0;i < 8; i++) {
addr += 16;
}
/* XXX: finish it, endianness */
//ldl(ptr + 0x1c);
for(i = 0; i < nb_xmm_regs; i++) {
#if HC_ARCH_BITS == 32
/* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
#else
#endif
addr += 16;
}
}
}
else
{
int fptag, j;
for(i = 0;i < 8; i++) {
fptag >>= 2;
}
for(i = 0;i < 8; i++) {
}
}
}
//*****************************************************************************
//*****************************************************************************
#endif /* VBOX */