cpu-exec.c revision cca8c8c55206ccd60f1b32843a67ce737447ac60
/*
* i386 emulator main execution loop
*
* Copyright (c) 2003-2005 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Sun elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#include "config.h"
#include "exec.h"
#include "disas.h"
#if !defined(CONFIG_SOFTMMU)
#include <signal.h>
#include <sys/ucontext.h>
#endif
int tb_invalidated_flag;
//#define DEBUG_EXEC
//#define DEBUG_SIGNAL
/* XXX: unify with i386 target */
void cpu_loop_exit(void)
{
}
#endif
#define reg_T2
#endif
/* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator
*/
{
#if !defined(CONFIG_SOFTMMU)
#endif
/* XXX: restore cpu registers saved in host registers */
#if !defined(CONFIG_SOFTMMU)
if (puc) {
/* XXX: use siglongjmp ? */
}
#endif
}
unsigned int flags)
{
int code_gen_size;
unsigned int h;
tb_invalidated_flag = 0;
regs_to_env(); /* XXX: do it just before cpu_gen_code() */
/* find translated block using physical mappings */
phys_page2 = -1;
h = tb_phys_hash_func(phys_pc);
ptb1 = &tb_phys_hash[h];
for(;;) {
if (!tb)
goto not_found;
/* check next page if needed */
goto found;
} else {
goto found;
}
}
}
/* if no translated code available, then translate it now */
if (!tb) {
/* flush must be done */
/* cannot fail at this point */
/* don't forget to invalidate previous TB info */
tb_invalidated_flag = 1;
}
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
/* check next page if needed */
phys_page2 = -1;
}
/* we add the TB in the virtual pc hash table */
return tb;
}
static inline TranslationBlock *tb_find_fast(void)
{
unsigned int flags;
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
#if defined(TARGET_I386)
#elif defined(TARGET_ARM)
cs_base = 0;
#elif defined(TARGET_SPARC)
#ifdef TARGET_SPARC64
// Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
#else
// FPU enable . MMU enabled . MMU no-fault . Supervisor
#endif
#elif defined(TARGET_PPC)
cs_base = 0;
#elif defined(TARGET_MIPS)
cs_base = 0;
#elif defined(TARGET_M68K)
cs_base = 0;
#elif defined(TARGET_SH4)
cs_base = 0; /* XXXXX */
#else
#endif
/* Note: we do it here to avoid a gcc bug on Mac OS X when
doing it in tb_find_slow */
if (tb_invalidated_flag) {
/* as some TB could have been invalidated because
of memory exceptions while generating the code, we
must recompute the hash index here */
T0 = 0;
}
}
return tb;
}
/* main execution loop */
#ifdef VBOX
{
#define DECLARE_HOST_REGS 1
#include "hostregs_helper.h"
int ret, interrupt_request;
void (*gen_func)(void);
#if defined(TARGET_I386)
/* handle exit of HALTED state */
/* disable halt condition */
} else {
return EXCP_HALTED;
}
}
#elif defined(TARGET_PPC)
} else {
return EXCP_HALTED;
}
}
#elif defined(TARGET_SPARC)
} else {
return EXCP_HALTED;
}
}
#elif defined(TARGET_ARM)
/* An interrupt wakes the CPU even if the I and F CPSR bits are
set. */
if (env1->interrupt_request
& (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
} else {
return EXCP_HALTED;
}
}
#elif defined(TARGET_MIPS)
if (env1->interrupt_request &
} else {
return EXCP_HALTED;
}
}
#endif
/* first we save global registers */
#define SAVE_HOST_REGS 1
#include "hostregs_helper.h"
#if defined(__sparc__) && !defined(HOST_SOLARIS)
/* we also save i7 because longjmp may not restore it */
#endif
#if defined(TARGET_I386)
env_to_regs();
/* put eflags in CPU temporary format */
#elif defined(TARGET_ARM)
#elif defined(TARGET_SPARC)
#if defined(reg_REGWPTR)
#endif
#elif defined(TARGET_PPC)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
/* XXXXX */
#else
#endif
#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
#endif
/* prepare setjmp context for exception handling */
for(;;) {
{
/*
* Check for fatal errors first
*/
}
/* if an exception is pending, we execute it here */
if (env->exception_index >= 0) {
/* exit request from the cpu execution loop */
break;
} else {
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
env->exception_next_eip, 0);
}
}
T0 = 0; /* force lookup of first TB */
for(;;)
{
if (__builtin_expect(interrupt_request, 0))
{
/* Single instruction exec request, we execute it and return (one way or the other).
The caller will always reschedule after doing this operation! */
{
/* not in flight are we? (if we are, we trapped) */
{
/* When we receive an external interrupt during execution of this single
instruction, then we should stay here. We will leave when we're ready
for raw-mode or when interrupted by pending EMT requests. */
if ( !(interrupt_request & CPU_INTERRUPT_HARD)
)
{
}
}
/* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
}
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
do_smm_enter();
T0 = 0;
}
else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
{
/* if hardware interrupt pending, we execute it */
int intno;
if (intno >= 0)
{
}
/* ensure that no TB jump will be modified as
the program flow was changed */
T0 = 0;
}
{
/* ensure that no TB jump will be modified as
the program flow was changed */
T0 = 0;
}
{
}
{
}
}
/*
* Check if we the CPU state allows us to execute the code in raw-mode.
*/
if (remR3CanExecuteRaw(env,
&env->exception_index))
{
}
tb = tb_find_fast();
/* see if we can patch the calling TB. When the TB
spans two pages, we cannot safely do a direct
jump. */
if (T0 != 0
{
}
/* execute the generated code */
#if !defined(DEBUG_bird)
if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
{
{
Log(("EMR0: %RGv ESP=%RGv IF=%d TF=%d CPL=%d\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
}
}
else
if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
{
{
{
Log(("EMV86: %04X:%RGv IF=%d TF=%d CPL=%d CR0=%RGr\n", env->segs[R_CS].selector, env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0]));
}
else
{
Log(("EMR3: %RGv ESP=%RGv IF=%d TF=%d CPL=%d IOPL=%d CR0=%RGr\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), env->cr[0]));
}
}
}
else
{
/* Seriously slows down realmode booting. */
LogFlow(("EMRM: %04X:%RGv SS:ESP=%04X:%RGv IF=%d TF=%d CPL=%d PE=%d PG=%d\n", env->segs[R_CS].selector, env->eip, env->segs[R_SS].selector, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0] & X86_CR0_PE, env->cr[0] & X86_CR0_PG));
}
#endif /* !DEBUG_bird */
{
#ifdef DEBUG_bird
static int s_cTimes = 0;
if (s_cTimes++ > 1000000)
{
RTLogPrintf("Enough stepping!\n");
#if 0
#else
#endif
}
#endif
{
}
}
else
{
gen_func();
}
#else /* !DEBUG || !VBOX || DEBUG_dmik */
gen_func();
#endif /* !DEBUG || !VBOX || DEBUG_dmik */
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
/* do not allow linking to another block */
T0 = 0;
}
#endif
}
} else {
env_to_regs();
}
#ifdef VBOX_HIGH_RES_TIMERS_HACK
/* NULL the current_tb here so cpu_interrupt() doesn't do
anything unnecessary (like crashing during emulate single instruction). */
#endif
} /* for(;;) */
#if defined(TARGET_I386)
/* restore flags in standard format */
#else
#endif
#include "hostregs_helper.h"
return ret;
}
#else /* !VBOX */
{
#define DECLARE_HOST_REGS 1
#include "hostregs_helper.h"
#if defined(__sparc__) && !defined(HOST_SOLARIS)
int saved_i7;
#endif
int ret, interrupt_request;
void (*gen_func)(void);
#if defined(TARGET_I386)
/* handle exit of HALTED state */
/* disable halt condition */
} else {
return EXCP_HALTED;
}
}
#elif defined(TARGET_PPC)
} else {
return EXCP_HALTED;
}
}
#elif defined(TARGET_SPARC)
} else {
return EXCP_HALTED;
}
}
#elif defined(TARGET_ARM)
/* An interrupt wakes the CPU even if the I and F CPSR bits are
set. */
if (env1->interrupt_request
& (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
} else {
return EXCP_HALTED;
}
}
#elif defined(TARGET_MIPS)
if (env1->interrupt_request &
} else {
return EXCP_HALTED;
}
}
#endif
/* first we save global registers */
#define SAVE_HOST_REGS 1
#include "hostregs_helper.h"
#if defined(__sparc__) && !defined(HOST_SOLARIS)
/* we also save i7 because longjmp may not restore it */
#endif
#if defined(TARGET_I386)
env_to_regs();
/* put eflags in CPU temporary format */
#elif defined(TARGET_ARM)
#elif defined(TARGET_SPARC)
#if defined(reg_REGWPTR)
#endif
#elif defined(TARGET_PPC)
#elif defined(TARGET_M68K)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
/* XXXXX */
#else
#endif
#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
#endif
/* prepare setjmp context for exception handling */
for(;;) {
#ifdef VBOX
/* Check for high priority requests first (like fatal
errors). */
}
#endif /* VBOX */
/* if an exception is pending, we execute it here */
if (env->exception_index >= 0) {
/* exit request from the cpu execution loop */
break;
} else if (env->user_mode_only) {
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
#if defined(TARGET_I386)
#endif
break;
} else {
#if defined(TARGET_I386)
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
env->exception_next_eip, 0);
#elif defined(TARGET_PPC)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SPARC)
#elif defined(TARGET_ARM)
#elif defined(TARGET_SH4)
#endif
}
}
#ifdef USE_KQEMU
int ret;
/* put eflags in CPU temporary format */
if (ret == 1) {
/* exception */
} else if (ret == 2) {
/* softmmu execution needed */
} else {
if (env->interrupt_request != 0) {
/* hardware interrupt will be executed just after */
} else {
/* otherwise, we restart */
}
}
}
#endif
T0 = 0; /* force lookup of first TB */
for(;;) {
#if defined(__sparc__) && !defined(HOST_SOLARIS)
/* g1 can be modified by some libc? functions */
#endif
if (__builtin_expect(interrupt_request, 0)) {
#ifdef VBOX
/* Single instruction exec request, we execute it and return (one way or the other).
The caller will always reschedule after doing this operation! */
{
/* not in flight are we? */
{
/* When we receive an external interrupt during execution of this single
instruction, then we should stay here. We will leave when we're ready
for raw-mode or when interrupted by pending EMT requests. */
if ( !(interrupt_request & CPU_INTERRUPT_HARD)
)
{
}
}
}
#endif /* VBOX */
#if defined(TARGET_I386)
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
do_smm_enter();
#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
#endif
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
int intno;
#if defined(VBOX)
#else
#endif
if (loglevel & CPU_LOG_TB_IN_ASM) {
}
#if defined(VBOX)
if (intno >= 0)
#endif
/* ensure that no TB jump will be modified as
the program flow was changed */
#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
#endif
}
#elif defined(TARGET_PPC)
#if 0
if ((interrupt_request & CPU_INTERRUPT_RESET)) {
}
#endif
if (msr_ee != 0) {
if ((interrupt_request & CPU_INTERRUPT_HARD)) {
/* Raise it */
env->error_code = 0;
#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
#endif
} else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
/* Raise it */
env->error_code = 0;
#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
#endif
}
}
#elif defined(TARGET_MIPS)
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
/* Raise it */
env->error_code = 0;
#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
#endif
}
#elif defined(TARGET_SPARC)
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
env->interrupt_index = 0;
#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
#endif
}
} else if (interrupt_request & CPU_INTERRUPT_TIMER) {
//do_interrupt(0, 0, 0, 0, 0);
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
}
#elif defined(TARGET_ARM)
}
}
#elif defined(TARGET_SH4)
/* XXXXX */
#endif
/* Don't use the cached interupt_request value,
do_interrupt may have updated the EXITTB flag. */
#if defined(VBOX)
#else
#endif
/* ensure that no TB jump will be modified as
the program flow was changed */
#if defined(__sparc__) && !defined(HOST_SOLARIS)
tmp_T0 = 0;
#else
T0 = 0;
#endif
}
#ifdef VBOX
#endif
if (interrupt_request & CPU_INTERRUPT_EXIT) {
#if defined(VBOX)
#else
#endif
}
#if defined(VBOX)
if (interrupt_request & CPU_INTERRUPT_RC) {
}
#endif
}
#ifdef DEBUG_EXEC
if ((loglevel & CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
/* restore flags in standard format */
#ifdef reg_EAX
#endif
#ifdef reg_EBX
#endif
#ifdef reg_ECX
#endif
#ifdef reg_EDX
#endif
#ifdef reg_ESI
#endif
#ifdef reg_EDI
#endif
#ifdef reg_EBP
#endif
#ifdef reg_ESP
#endif
#elif defined(TARGET_ARM)
#elif defined(TARGET_SPARC)
#elif defined(TARGET_PPC)
#elif defined(TARGET_M68K)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
#else
#endif
}
#endif
#ifdef VBOX
/*
* Check if we the CPU state allows us to execute the code in raw-mode.
*/
if (remR3CanExecuteRaw(env,
{
}
#endif /* VBOX */
tb = tb_find_fast();
#ifdef DEBUG_EXEC
if ((loglevel & CPU_LOG_EXEC)) {
}
#endif
#if defined(__sparc__) && !defined(HOST_SOLARIS)
#endif
/* see if we can patch the calling TB. When the TB
spans two pages, we cannot safely do a direct
jump. */
{
if (T0 != 0 &&
#if USE_KQEMU
#endif
#ifdef VBOX
#endif
#if defined(TARGET_I386) && defined(USE_CODE_COPY)
#endif
) {
#if defined(USE_CODE_COPY)
/* propagates the FP use info */
#endif
}
}
/* execute the generated code */
#if defined(__sparc__)
"mov %%o7,%%i0"
: /* no outputs */
: "r" (gen_func)
: "i0", "i1", "i2", "i3", "i4", "i5",
"l0", "l1", "l2", "l3", "l4", "l5",
"l6", "l7");
asm volatile ("mov pc, %0\n\t"
".global exec_loop\n\t"
"exec_loop:\n\t"
: /* no outputs */
: "r" (gen_func)
: "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
{
}
gen_func();
} else {
}
/* we work with native eflags */
asm(".globl exec_loop\n"
"\n"
"debug1:\n"
" pushl %%ebp\n"
" fs movl %10, %9\n"
" fs movl %11, %%eax\n"
" andl $0x400, %%eax\n"
" fs orl %8, %%eax\n"
" pushl %%eax\n"
" popf\n"
" fs movl %%esp, %12\n"
" fs movl %0, %%eax\n"
" fs movl %1, %%ecx\n"
" fs movl %2, %%edx\n"
" fs movl %3, %%ebx\n"
" fs movl %4, %%esp\n"
" fs movl %5, %%ebp\n"
" fs movl %6, %%esi\n"
" fs movl %7, %%edi\n"
" fs jmp *%9\n"
"exec_loop:\n"
" fs movl %%esp, %4\n"
" fs movl %12, %%esp\n"
" fs movl %%eax, %0\n"
" fs movl %%ecx, %1\n"
" fs movl %%edx, %2\n"
" fs movl %%ebx, %3\n"
" fs movl %%ebp, %5\n"
" fs movl %%esi, %6\n"
" fs movl %%edi, %7\n"
" pushf\n"
" popl %%eax\n"
" movl %%eax, %%ecx\n"
" andl $0x400, %%ecx\n"
" shrl $9, %%ecx\n"
" andl $0x8d5, %%eax\n"
" fs movl %%eax, %8\n"
" movl $1, %%eax\n"
" subl %%ecx, %%eax\n"
" fs movl %%eax, %11\n"
" fs movl %9, %%ebx\n" /* get T0 value */
" popl %%ebp\n"
:
"a" (gen_func),
: "%ecx", "%edx"
);
}
}
struct fptr {
void *ip;
void *gp;
} fp;
(*(void (*)(void)) &fp)();
#else
#if !defined(DEBUG_bird)
if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
{
{
Log(("EMR0: %RGv IF=%d TF=%d CPL=%d\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
}
}
else
if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
{
{
{
Log(("EMV86: %RGv IF=%d TF=%d CPL=%d flags=%08X CR0=%RGr\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, flags, env->cr[0]));
}
else
{
Log(("EMR3: %RGv IF=%d TF=%d CPL=%d IOPL=%d flags=%08X CR0=%RGr\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), flags, env->cr[0]));
}
}
}
#endif /* !DEBUG_bird */
{
#ifdef DEBUG_bird
static int s_cTimes = 0;
{
RTLogPrintf("Enough stepping!\n");
#if 0
#else
#endif
}
#endif
{
}
}
else
{
gen_func();
}
#else /* !DEBUG || !VBOX || DEBUG_dmik */
#ifdef VBOX
gen_func();
#else /* !VBOX */
gen_func();
#endif /* !VBOX */
#endif /* !DEBUG || !VBOX || DEBUG_dmik */
#endif
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
/* do not allow linking to another block */
T0 = 0;
}
#endif
#if defined(USE_KQEMU)
if (kqemu_is_ok(env) &&
}
#endif
}
} else {
env_to_regs();
}
} /* for(;;) */
#if defined(TARGET_I386)
#if defined(USE_CODE_COPY)
if (env->native_fp_regs) {
}
#endif
/* restore flags in standard format */
#elif defined(TARGET_ARM)
#elif defined(TARGET_SPARC)
#if defined(reg_REGWPTR)
#endif
#elif defined(TARGET_PPC)
#elif defined(TARGET_M68K)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
/* XXXXX */
#else
#endif
#if defined(__sparc__) && !defined(HOST_SOLARIS)
#endif
#include "hostregs_helper.h"
/* fail safe : never use cpu_single_env outside cpu_exec() */
return ret;
}
#endif /* !VBOX */
/* must only be called from the generated code as an exception can be
generated */
{
/* XXX: cannot enable it yet because it yields to MMU exception
where NIP != read address on PowerPC */
#if 0
#endif
}
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
{
env = s;
selector &= 0xffff;
} else {
}
}
{
env = s;
}
{
env = s;
}
#endif /* TARGET_I386 */
#if !defined(CONFIG_SOFTMMU)
#if defined(TARGET_I386)
/* 'pc' is the host PC at which the exception was raised. 'address' is
the effective address of the memory exception. 'is_write' is 1 if a
write caused the exception and otherwise 0'. 'old_set' is the
signal set which should be restored */
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
if (ret == 1) {
#if 0
printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
#endif
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
} else {
/* activate soft MMU for this block */
}
/* never comes here */
return 1;
}
#elif defined(TARGET_ARM)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
}
#elif defined(TARGET_SPARC)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
}
#elif defined (TARGET_PPC)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
if (ret == 1) {
#if 0
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
#endif
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
} else {
/* activate soft MMU for this block */
}
/* never comes here */
return 1;
}
#elif defined(TARGET_M68K)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
/* never comes here */
return 1;
}
#elif defined (TARGET_MIPS)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
if (ret == 1) {
#if 0
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
#endif
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
} else {
/* activate soft MMU for this block */
}
/* never comes here */
return 1;
}
#elif defined (TARGET_SH4)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
#if 0
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
#endif
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
/* never comes here */
return 1;
}
#else
#endif
#if defined(__i386__)
#if defined(USE_CODE_COPY)
{
if (cpu_single_env)
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
}
#endif
void *puc)
{
unsigned long pc;
int trapno;
#ifndef REG_EIP
/* for glibc 2.1 */
#define REG_TRAPNO TRAPNO
#endif
#if defined(TARGET_I386) && defined(USE_CODE_COPY)
/* send division by zero or bound exception */
return 1;
} else
#endif
trapno == 0xe ?
}
#elif defined(__x86_64__)
void *puc)
{
unsigned long pc;
}
#elif defined(__powerpc__)
/***********************************************************************
* signal context platform-specific definitions
* From Wine
*/
#ifdef linux
/* All Registers access - only for local access */
/* Gpr Registers access */
/* Float Registers access */
# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
/* Exception Registers access */
#endif /* linux */
#ifdef __APPLE__
# include <sys/ucontext.h>
typedef struct ucontext SIGCONTEXT;
/* All Registers access - only for local access */
/* Gpr Registers access */
/* Float Registers access */
/* Exception Registers access */
#endif /* __APPLE__ */
void *puc)
{
unsigned long pc;
int is_write;
is_write = 0;
#if 0
/* ppc 4xx case */
is_write = 1;
#else
is_write = 1;
#endif
}
void *puc)
{
int is_write = 0;
/* XXX: need kernel patch to get write flag faster */
switch (insn >> 26) {
case 0x0d: // stw
case 0x0e: // stb
case 0x0f: // stq_u
case 0x24: // stf
case 0x25: // stg
case 0x26: // sts
case 0x27: // stt
case 0x2c: // stl
case 0x2d: // stq
case 0x2e: // stl_c
case 0x2f: // stq_c
is_write = 1;
}
}
void *puc)
{
unsigned long pc;
int is_write;
/* XXX: is there a standard glibc define ? */
/* XXX: need kernel patch to get write flag faster */
is_write = 0;
case 0x05: // stb
case 0x06: // sth
case 0x04: // st
case 0x07: // std
case 0x24: // stf
case 0x27: // stdf
case 0x25: // stfsr
is_write = 1;
break;
}
}
}
void *puc)
{
unsigned long pc;
int is_write;
/* XXX: compute is_write */
is_write = 0;
}
void *puc)
{
unsigned long pc;
int is_write;
/* XXX: compute is_write */
is_write = 0;
}
#ifndef __ISR_VALID
# define __ISR_VALID 1
#endif
{
unsigned long ip;
int is_write = 0;
switch (host_signum) {
case SIGILL:
case SIGFPE:
case SIGSEGV:
case SIGBUS:
case SIGTRAP:
/* ISR.W (write-access) is bit 33: */
break;
default:
break;
}
}
void *puc)
{
unsigned long pc;
int is_write;
/* XXX: compute is_write */
is_write = 0;
}
#else
#endif
#endif /* !defined(CONFIG_SOFTMMU) */