exec-all.h revision 677833bc953b6cb418c701facbdcf4aa18d6c44e
/*
* internal execution defines for qemu
*
* Copyright (c) 2003 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* allow to see translation results - the slowdown should be negligible, so we leave it */
#ifndef VBOX
#define DEBUG_DISAS
#endif
#ifdef VBOX
#ifndef LOG_GROUP
#define LOG_GROUP LOG_GROUP_REM
#endif
#include "REMInternal.h"
#endif /* VBOX */
#ifndef glue
#define xglue(x, y) x ## y
#define tostring(s) #s
#endif
#if GCC_MAJOR < 3
#define __builtin_expect(x, n) (x)
#endif
#ifdef __i386__
#else
#define REGPARM(n)
#endif
/* is_jmp field values */
#define DISAS_NEXT 0 /* next instruction can be analyzed */
struct TranslationBlock;
/* XXX: make safe guess about sizes */
#define MAX_OP_PER_INSTR 32
#define OPC_BUF_SIZE 512
extern long gen_labels[OPC_BUF_SIZE];
extern int nb_gen_labels;
typedef void (GenOpFunc)(void);
typedef void (GenOpFunc1)(long);
typedef void (GenOpFunc2)(long, long);
typedef void (GenOpFunc3)(long, long, long);
#if defined(TARGET_I386)
void optimize_flags_init(void);
#endif
extern int loglevel;
int max_code_size, int *gen_code_size_ptr);
void *puc);
int max_code_size, int *gen_code_size_ptr);
void *puc);
void cpu_exec_init(void);
int is_cpu_write_access);
int is_user, int is_softmmu);
#define CODE_GEN_MAX_SIZE 65536
#define CODE_GEN_HASH_BITS 15
#define CODE_GEN_PHYS_HASH_BITS 15
/* maximum total translate dcode allocated */
/* NOTE: the translated code area cannot be too big because on some
archs the range of "fast" function calls is limited. Here is a
summary of the ranges:
i386 : signed 32 bits
arm : signed 26 bits
ppc : signed 24 bits
sparc : signed 32 bits
alpha : signed 23 bits
*/
#if defined(__alpha__)
#elif defined(__powerpc__)
#else
#endif
//#define CODE_GEN_BUFFER_SIZE (128 * 1024)
/* estimated block size for TB allocation */
/* XXX: use a per code average code fragment size and modulate it
according to the host CPU */
#if defined(CONFIG_SOFTMMU)
#define CODE_GEN_AVG_BLOCK_SIZE 128
#else
#define CODE_GEN_AVG_BLOCK_SIZE 64
#endif
#if defined(__powerpc__)
#define USE_DIRECT_JUMP
#endif
#define USE_DIRECT_JUMP
#endif
#ifdef VBOX /* bird: not safe in next step because of threading & cpu_interrupt. */
#endif /* VBOX */
typedef struct TranslationBlock {
unsigned int flags; /* flags defining in which context the code was generated */
size <= TARGET_PAGE_SIZE) */
#ifdef VBOX
#endif
/* next matching tb for physical address. */
struct TranslationBlock *phys_hash_next;
/* first and second physical page containing code. The lower bit
of the pointer tells the index in page_next[] */
/* the following data are used to directly call another TB from
the code of this one. */
#ifdef USE_DIRECT_JUMP
#else
#endif
/* list of TBs jumping to this one. This is a circular list using
the two least significant bits of the pointers to tell what is
the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
jmp_first */
struct TranslationBlock *jmp_first;
{
}
static inline unsigned int tb_phys_hash_func(unsigned long pc)
{
}
extern uint8_t *code_gen_ptr;
/* find a translation block in the translation cache. If not found,
return NULL and the pointer to the last element of the list in pptb */
unsigned int flags)
{
unsigned int h;
h = tb_hash_func(pc);
for(;;) {
if (!tb)
break;
return tb;
}
return NULL;
}
#if defined(USE_DIRECT_JUMP)
#if defined(__powerpc__)
{
/* patch the branch destination */
/* flush icache */
asm volatile ("sync" : : : "memory");
asm volatile ("sync" : : : "memory");
asm volatile ("isync" : : : "memory");
}
{
/* patch the branch destination */
/* no need to flush icache explicitely */
}
#endif
int n, unsigned long addr)
{
unsigned long offset;
if (offset != 0xffff)
}
#else
/* set the jump target */
int n, unsigned long addr)
{
}
#endif
{
/* NOTE: this test is only needed for thread safety */
/* patch the native jump address */
/* add in TB jmp circular list */
}
}
#ifndef offsetof
#endif
#if defined(_WIN32)
#define ASM_DATA_SECTION ".section \".data\"\n"
#define ASM_PREVIOUS_SECTION ".section .text\n"
#define ASM_DATA_SECTION ".data\n"
#define ASM_PREVIOUS_SECTION ".text\n"
#else
#define ASM_DATA_SECTION ".section \".data\"\n"
#define ASM_PREVIOUS_SECTION ".previous\n"
#endif
#if defined(__powerpc__)
/* we patch the jump instruction directly */
do {\
asm volatile (ASM_DATA_SECTION\
".long 1f\n"\
"1:\n");\
} while (0)
/* we patch the jump instruction directly */
do {\
asm volatile (".section .data\n"\
".long 1f\n"\
"1:\n");\
} while (0)
#else
/* jump to next block operations (more portable code, does not need
cache flushing, but slower because of indirect jump) */
do {\
label ## n: ;\
dummy_label ## n: ;\
} while (0)
#endif
/* XXX: will be suppressed */
do {\
EXIT_TB();\
} while (0)
extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
#ifdef __powerpc__
static inline int testandset (int *p)
{
int ret;
"0: lwarx %0,0,%1\n"
" xor. %0,%3,%0\n"
" bne 1f\n"
" stwcx. %2,0,%1\n"
" bne- 0b\n"
"1: "
: "=&r" (ret)
: "r" (p), "r" (1), "r" (0)
: "cr0", "memory");
return ret;
}
#endif
#ifdef __i386__
static inline int testandset (int *p)
{
long int readval = 0;
: "r" (1)
: "cc");
return readval;
}
#endif
#ifdef __x86_64__
static inline int testandset (int *p)
{
long int readval = 0;
: "r" (1)
: "cc");
return readval;
}
#endif
#ifdef __s390__
static inline int testandset (int *p)
{
int ret;
" jl 0b"
: "=&d" (ret)
: "r" (1), "a" (p), "0" (*p)
: "cc", "memory" );
return ret;
}
#endif
#ifdef __alpha__
static inline int testandset (int *p)
{
int ret;
unsigned long one;
" ldl_l %0,%1\n"
" stl_c %2,%1\n"
" beq %2,1f\n"
".subsection 2\n"
"1: br 0b\n"
".previous"
: "m" (*p));
return ret;
}
#endif
#ifdef __sparc__
static inline int testandset (int *p)
{
int ret;
: "=r" (ret)
: "r" (p)
: "memory");
return (ret ? 1 : 0);
}
#endif
#ifdef __arm__
static inline int testandset (int *spinlock)
{
register unsigned int ret;
: "=r"(ret)
return ret;
}
#endif
#ifdef __mc68000
static inline int testandset (int *p)
{
char ret;
: "=r" (ret)
: "m" (p)
: "cc","memory");
return ret;
}
#endif
typedef int spinlock_t;
#define SPIN_LOCK_UNLOCKED 0
#if defined(CONFIG_USER_ONLY)
{
while (testandset(lock));
}
{
*lock = 0;
}
{
return !testandset(lock);
}
#else
{
}
{
}
{
return 1;
}
#endif
extern spinlock_t tb_lock;
extern int tb_invalidated_flag;
#if !defined(CONFIG_USER_ONLY)
void *retaddr);
#define ACCESS_TYPE 3
#define env cpu_single_env
#define DATA_SIZE 1
#include "softmmu_header.h"
#define DATA_SIZE 2
#include "softmmu_header.h"
#define DATA_SIZE 4
#include "softmmu_header.h"
#define DATA_SIZE 8
#include "softmmu_header.h"
#endif
#if defined(CONFIG_USER_ONLY)
{
return addr;
}
#else
# ifdef VBOX
target_ulong remR3PhysGetPhysicalAddressCode(CPUState *env, target_ulong addr, CPUTLBEntry *pTLBEntry);
# endif
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
is the offset relative to phys_ram_base */
/* XXX: i386 target specific */
{
#if defined(TARGET_I386)
#elif defined (TARGET_PPC)
#elif defined (TARGET_SPARC)
#else
#error "Unimplemented !"
#endif
(addr & TARGET_PAGE_MASK), 0)) {
}
if (pd > IO_MEM_ROM) {
#ifdef VBOX
/* deal with non-MMIO access handlers. */
#else
#endif
}
#ifdef VBOX
#else
#endif
}
#endif