cpu-exec.c revision 9fb85b14bb8b3ebb26e2a1307caf67ca8040f171
/*
* i386 emulator main execution loop
*
* Copyright (c) 2003-2005 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Sun elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#include "config.h"
#define CPU_NO_GLOBAL_REGS
#include "exec.h"
#include "disas.h"
#include "tcg.h"
#if !defined(CONFIG_SOFTMMU)
#include <signal.h>
#include <sys/ucontext.h>
#endif
#if defined(__sparc__) && !defined(HOST_SOLARIS)
// Work around ugly bugs in glibc that mangle global register contents
#define env cpu_single_env
#endif
int tb_invalidated_flag;
//#define DEBUG_EXEC
//#define DEBUG_SIGNAL
void cpu_loop_exit(void)
{
/* NOTE: the register at this point must be saved by hand because
longjmp restore them */
regs_to_env();
}
#define reg_T2
#endif
/* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator
*/
{
#if !defined(CONFIG_SOFTMMU)
#endif
/* XXX: restore cpu registers saved in host registers */
#if !defined(CONFIG_SOFTMMU)
if (puc) {
/* XXX: use siglongjmp ? */
}
#endif
}
/* Execute the code without caching the generated code. An interpreter
could be used if available. */
{
unsigned long next_tb;
/* Should never happen.
We only end up here when an existing TB is too long. */
if (max_cycles > CF_COUNT_MASK)
/* execute the generated code */
#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
#else
#endif
/* Restore PC. This may happen if async event occurs before
the TB starts executing. */
}
}
{
unsigned int h;
tb_invalidated_flag = 0;
regs_to_env(); /* XXX: do it just before cpu_gen_code() */
/* find translated block using physical mappings */
phys_page2 = -1;
h = tb_phys_hash_func(phys_pc);
ptb1 = &tb_phys_hash[h];
for(;;) {
if (!tb)
goto not_found;
/* check next page if needed */
goto found;
} else {
goto found;
}
}
}
/* if no translated code available, then translate it now */
/* we add the TB in the virtual pc hash table */
return tb;
}
#ifndef VBOX
static inline TranslationBlock *tb_find_fast(void)
#else
#endif
{
/* we record a subset of the CPU state. It will
always be the same before a given translated block
is executed. */
#if defined(TARGET_I386)
#elif defined(TARGET_ARM)
cs_base = 0;
#elif defined(TARGET_SPARC)
#ifdef TARGET_SPARC64
// AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
#else
// FPU enable . Supervisor
#endif
#elif defined(TARGET_PPC)
cs_base = 0;
#elif defined(TARGET_MIPS)
cs_base = 0;
#elif defined(TARGET_M68K)
cs_base = 0;
#elif defined(TARGET_SH4)
cs_base = 0;
#elif defined(TARGET_ALPHA)
cs_base = 0;
#elif defined(TARGET_CRIS)
cs_base = 0;
#else
#endif
}
return tb;
}
/* main execution loop */
#ifdef VBOX
{
#define DECLARE_HOST_REGS 1
#include "hostregs_helper.h"
int ret = 0, interrupt_request;
unsigned long next_tb;
/* first we save global registers */
#define SAVE_HOST_REGS 1
#include "hostregs_helper.h"
env_to_regs();
#if defined(TARGET_I386)
/* put eflags in CPU temporary format */
#elif defined(TARGET_SPARC)
#elif defined(TARGET_M68K)
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_ARM)
#elif defined(TARGET_PPC)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
#elif defined(TARGET_CRIS)
/* XXXXX */
#else
#endif
#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
#endif
/* prepare setjmp context for exception handling */
for(;;) {
{
/*
* Check for fatal errors first
*/
}
/* if an exception is pending, we execute it here */
if (env->exception_index >= 0) {
/* exit request from the cpu execution loop */
break;
} else {
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
env->exception_next_eip, 0);
/* successfully delivered */
}
}
next_tb = 0; /* force lookup of first TB */
for(;;)
{
#ifndef VBOX
if (__builtin_expect(interrupt_request, 0))
#else
if (RT_UNLIKELY(interrupt_request != 0))
#endif
{
/** @todo: reconscille with what QEMU really does */
/* Single instruction exec request, we execute it and return (one way or the other).
The caller will always reschedule after doing this operation! */
{
/* not in flight are we? (if we are, we trapped) */
{
/* When we receive an external interrupt during execution of this single
instruction, then we should stay here. We will leave when we're ready
for raw-mode or when interrupted by pending EMT requests. */
if ( !(interrupt_request & CPU_INTERRUPT_HARD)
)
{
}
}
/* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
}
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
do_smm_enter();
next_tb = 0;
}
else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
{
/* if hardware interrupt pending, we execute it */
int intno;
if (intno >= 0)
{
}
/* ensure that no TB jump will be modified as
the program flow was changed */
next_tb = 0;
}
{
/* ensure that no TB jump will be modified as
the program flow was changed */
next_tb = 0;
}
{
}
{
}
}
/*
* Check if we the CPU state allows us to execute the code in raw-mode.
*/
if (remR3CanExecuteRaw(env,
&env->exception_index))
{
}
tb = tb_find_fast();
/* Note: we do it here to avoid a gcc bug on Mac OS X when
doing it in tb_find_slow */
if (tb_invalidated_flag) {
/* as some TB could have been invalidated because
of memory exceptions while generating the code, we
must recompute the hash index here */
next_tb = 0;
tb_invalidated_flag = 0;
}
/* see if we can patch the calling TB. When the TB
spans two pages, we cannot safely do a direct
jump. */
if (next_tb != 0
{
}
while (env->current_tb) {
/* execute the generated code */
#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
#else
#endif
/* Instruction counter expired. */
int insns_left;
/* Restore PC. */
/* Refill decrementer and continue execution. */
insns_left = 0xffff;
} else {
}
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
}
next_tb = 0;
}
}
}
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
/* do not allow linking to another block */
next_tb = 0;
}
#endif
} /* for(;;) */
} else {
env_to_regs();
}
#ifdef VBOX_HIGH_RES_TIMERS_HACK
/* NULL the current_tb here so cpu_interrupt() doesn't do
anything unnecessary (like crashing during emulate single instruction). */
* anymore, see #3883 */
#endif
} /* for(;;) */
#if defined(TARGET_I386)
/* restore flags in standard format */
#else
#endif
#include "hostregs_helper.h"
return ret;
}
#else /* !VBOX */
{
#define DECLARE_HOST_REGS 1
#include "hostregs_helper.h"
int ret, interrupt_request;
unsigned long next_tb;
return EXCP_HALTED;
/* first we save global registers */
#define SAVE_HOST_REGS 1
#include "hostregs_helper.h"
env_to_regs();
#if defined(TARGET_I386)
/* put eflags in CPU temporary format */
#elif defined(TARGET_SPARC)
#elif defined(TARGET_M68K)
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_ARM)
#elif defined(TARGET_PPC)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
#elif defined(TARGET_CRIS)
/* XXXXX */
#else
#endif
/* prepare setjmp context for exception handling */
for(;;) {
/* if an exception is pending, we execute it here */
if (env->exception_index >= 0) {
/* exit request from the cpu execution loop */
break;
} else if (env->user_mode_only) {
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
#if defined(TARGET_I386)
/* successfully delivered */
#endif
break;
} else {
#if defined(TARGET_I386)
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
env->exception_next_eip, 0);
/* successfully delivered */
#elif defined(TARGET_PPC)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SPARC)
#elif defined(TARGET_ARM)
#elif defined(TARGET_SH4)
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_CRIS)
#elif defined(TARGET_M68K)
do_interrupt(0);
#endif
}
}
#ifdef USE_KQEMU
int ret;
/* put eflags in CPU temporary format */
if (ret == 1) {
/* exception */
} else if (ret == 2) {
/* softmmu execution needed */
} else {
if (env->interrupt_request != 0) {
/* hardware interrupt will be executed just after */
} else {
/* otherwise, we restart */
}
}
}
#endif
next_tb = 0; /* force lookup of first TB */
for(;;) {
if (unlikely(interrupt_request) &&
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
}
if (interrupt_request & CPU_INTERRUPT_HALT) {
}
#endif
#if defined(TARGET_I386)
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
do_smm_enter();
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
int intno;
if (loglevel & CPU_LOG_TB_IN_ASM) {
}
/* ensure that no TB jump will be modified as
the program flow was changed */
next_tb = 0;
#if !defined(CONFIG_USER_ONLY)
} else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
int intno;
/* FIXME: this should respect TPR */
if (loglevel & CPU_LOG_TB_IN_ASM)
next_tb = 0;
#endif
}
}
#elif defined(TARGET_PPC)
#if 0
if ((interrupt_request & CPU_INTERRUPT_RESET)) {
}
#endif
if (interrupt_request & CPU_INTERRUPT_HARD) {
if (env->pending_interrupts == 0)
next_tb = 0;
}
#elif defined(TARGET_MIPS)
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
/* Raise it */
env->error_code = 0;
next_tb = 0;
}
#elif defined(TARGET_SPARC)
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
env->interrupt_index = 0;
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
#endif
next_tb = 0;
}
} else if (interrupt_request & CPU_INTERRUPT_TIMER) {
//do_interrupt(0, 0, 0, 0, 0);
}
#elif defined(TARGET_ARM)
next_tb = 0;
}
/* ARMv7-M interrupt return works by loading a magic value
into the PC. On real hardware the load causes the
return to occur. The qemu implementation performs the
jump normally, then does the exception return when the
CPU tries to execute code at the magic address.
This will cause the magic PC value to be pushed to
the stack if an interrupt occured at the wrong time.
We avoid this by disabling interrupts when
pc contains a magic address. */
next_tb = 0;
}
#elif defined(TARGET_SH4)
if (interrupt_request & CPU_INTERRUPT_HARD) {
next_tb = 0;
}
#elif defined(TARGET_ALPHA)
if (interrupt_request & CPU_INTERRUPT_HARD) {
next_tb = 0;
}
#elif defined(TARGET_CRIS)
next_tb = 0;
}
next_tb = 0;
}
#elif defined(TARGET_M68K)
< env->pending_level) {
/* Real hardware gets the interrupt vector via an
IACK cycle at this point. Current emulated
hardware doesn't rely on this, so we
first signalled. */
do_interrupt(1);
next_tb = 0;
}
#endif
/* Don't use the cached interupt_request value,
do_interrupt may have updated the EXITTB flag. */
/* ensure that no TB jump will be modified as
the program flow was changed */
next_tb = 0;
}
if (interrupt_request & CPU_INTERRUPT_EXIT) {
}
}
#ifdef DEBUG_EXEC
if ((loglevel & CPU_LOG_TB_CPU)) {
/* restore flags in standard format */
regs_to_env();
#if defined(TARGET_I386)
#elif defined(TARGET_ARM)
#elif defined(TARGET_SPARC)
#elif defined(TARGET_PPC)
#elif defined(TARGET_M68K)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_CRIS)
#else
#endif
}
#endif
tb = tb_find_fast();
/* Note: we do it here to avoid a gcc bug on Mac OS X when
doing it in tb_find_slow */
if (tb_invalidated_flag) {
/* as some TB could have been invalidated because
of memory exceptions while generating the code, we
must recompute the hash index here */
next_tb = 0;
tb_invalidated_flag = 0;
}
#ifdef DEBUG_EXEC
if ((loglevel & CPU_LOG_EXEC)) {
}
#endif
/* see if we can patch the calling TB. When the TB
spans two pages, we cannot safely do a direct
jump. */
{
if (next_tb != 0 &&
#ifdef USE_KQEMU
#endif
}
}
while (env->current_tb) {
/* execute the generated code */
#if defined(__sparc__) && !defined(HOST_SOLARIS)
#define env cpu_single_env
#endif
/* Instruction counter expired. */
int insns_left;
/* Restore PC. */
/* Refill decrementer and continue execution. */
insns_left = 0xffff;
} else {
}
} else {
if (insns_left > 0) {
/* Execute remaining instructions. */
}
next_tb = 0;
}
}
}
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
#if defined(USE_KQEMU)
if (kqemu_is_ok(env) &&
}
#endif
} /* for(;;) */
} else {
env_to_regs();
}
} /* for(;;) */
#if defined(TARGET_I386)
/* restore flags in standard format */
#elif defined(TARGET_ARM)
#elif defined(TARGET_SPARC)
#elif defined(TARGET_PPC)
#elif defined(TARGET_M68K)
#elif defined(TARGET_MIPS)
#elif defined(TARGET_SH4)
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_CRIS)
/* XXXXX */
#else
#endif
/* restore global registers */
#include "hostregs_helper.h"
/* fail safe : never use cpu_single_env outside cpu_exec() */
return ret;
}
#endif /* !VBOX */
/* must only be called from the generated code as an exception can be
generated */
{
/* XXX: cannot enable it yet because it yields to MMU exception
where NIP != read address on PowerPC */
#if 0
#endif
}
#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
{
env = s;
selector &= 0xffff;
} else {
}
}
{
env = s;
}
{
env = s;
}
#endif /* TARGET_I386 */
#if !defined(CONFIG_SOFTMMU)
#if defined(TARGET_I386)
/* 'pc' is the host PC at which the exception was raised. 'address' is
the effective address of the memory exception. 'is_write' is 1 if a
write caused the exception and otherwise 0'. 'old_set' is the
signal set which should be restored */
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
if (ret == 1) {
#if 0
printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
#endif
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
} else {
/* activate soft MMU for this block */
}
/* never comes here */
return 1;
}
#elif defined(TARGET_ARM)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
}
#elif defined(TARGET_SPARC)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
}
#elif defined (TARGET_PPC)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
if (ret == 1) {
#if 0
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
#endif
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
} else {
/* activate soft MMU for this block */
}
/* never comes here */
return 1;
}
#elif defined(TARGET_M68K)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
/* never comes here */
return 1;
}
#elif defined (TARGET_MIPS)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
if (ret == 1) {
#if 0
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
#endif
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
} else {
/* activate soft MMU for this block */
}
/* never comes here */
return 1;
}
#elif defined (TARGET_SH4)
void *puc)
{
int ret;
if (cpu_single_env)
#if defined(DEBUG_SIGNAL)
printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
#endif
/* XXX: locking issue */
return 1;
}
/* see if it is an MMU fault */
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
return 1; /* the MMU fault was handled without causing real CPU fault */
/* now we have a real cpu fault */
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
}
#if 0
printf("PF exception: NIP=0x%08x error=0x%x %p\n",
#endif
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
/* never comes here */
return 1;
}
#else
#endif
#if defined(__i386__)
#if defined(__APPLE__)
# include <sys/ucontext.h>
#else
#endif
void *puc)
{
unsigned long pc;
int trapno;
#ifndef REG_EIP
/* for glibc 2.1 */
#define REG_TRAPNO TRAPNO
#endif
#if defined(TARGET_I386) && defined(USE_CODE_COPY)
/* send division by zero or bound exception */
return 1;
} else
#endif
trapno == 0xe ?
}
#elif defined(__x86_64__)
void *puc)
{
unsigned long pc;
}
#elif defined(__powerpc__)
/***********************************************************************
* signal context platform-specific definitions
* From Wine
*/
#ifdef linux
/* All Registers access - only for local access */
/* Gpr Registers access */
/* Float Registers access */
# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
/* Exception Registers access */
#endif /* linux */
#ifdef __APPLE__
# include <sys/ucontext.h>
typedef struct ucontext SIGCONTEXT;
/* All Registers access - only for local access */
/* Gpr Registers access */
/* Float Registers access */
/* Exception Registers access */
#endif /* __APPLE__ */
void *puc)
{
unsigned long pc;
int is_write;
is_write = 0;
#if 0
/* ppc 4xx case */
is_write = 1;
#else
is_write = 1;
#endif
}
void *puc)
{
int is_write = 0;
/* XXX: need kernel patch to get write flag faster */
switch (insn >> 26) {
case 0x0d: // stw
case 0x0e: // stb
case 0x0f: // stq_u
case 0x24: // stf
case 0x25: // stg
case 0x26: // sts
case 0x27: // stt
case 0x2c: // stl
case 0x2d: // stq
case 0x2e: // stl_c
case 0x2f: // stq_c
is_write = 1;
}
}
void *puc)
{
unsigned long pc;
int is_write;
/* XXX: is there a standard glibc define ? */
/* XXX: need kernel patch to get write flag faster */
is_write = 0;
case 0x05: // stb
case 0x06: // sth
case 0x04: // st
case 0x07: // std
case 0x24: // stf
case 0x27: // stdf
case 0x25: // stfsr
is_write = 1;
break;
}
}
}
void *puc)
{
unsigned long pc;
int is_write;
/* XXX: compute is_write */
is_write = 0;
}
void *puc)
{
unsigned long pc;
int is_write;
/* XXX: compute is_write */
is_write = 0;
}
#ifndef __ISR_VALID
# define __ISR_VALID 1
#endif
{
unsigned long ip;
int is_write = 0;
switch (host_signum) {
case SIGILL:
case SIGFPE:
case SIGSEGV:
case SIGBUS:
case SIGTRAP:
/* ISR.W (write-access) is bit 33: */
break;
default:
break;
}
}
void *puc)
{
unsigned long pc;
int is_write;
/* XXX: compute is_write */
is_write = 0;
}
#else
#endif
#endif /* !defined(CONFIG_SOFTMMU) */