cpu-all.h revision 12fcc878a631e75b88a82cebc92d1cd57b09c8e7
/*
* defines common to all virtual CPUs
*
* Copyright (c) 2003 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef CPU_ALL_H
#define CPU_ALL_H
#ifdef VBOX
# ifndef LOG_GROUP
# define LOG_GROUP LOG_GROUP_REM
# endif
#endif
#define WORDS_ALIGNED
#endif
/* some important defines:
*
* WORDS_ALIGNED : if defined, the host cpu can only make word aligned
* memory accesses.
*
* WORDS_BIGENDIAN : if defined, the host cpu is big endian and
* otherwise little endian.
*
* (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
*
* TARGET_WORDS_BIGENDIAN : same for target cpu
*/
#include "bswap.h"
#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
#define BSWAP_NEEDED
#endif
#ifdef BSWAP_NEEDED
{
return bswap16(s);
}
{
return bswap32(s);
}
{
return bswap64(s);
}
{
*s = bswap16(*s);
}
{
*s = bswap32(*s);
}
{
*s = bswap64(*s);
}
#else
{
return s;
}
{
return s;
}
{
return s;
}
{
}
{
}
{
}
#endif
#if TARGET_LONG_SIZE == 4
#else
#endif
/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
endian ! */
typedef union {
float64 d;
#if defined(WORDS_BIGENDIAN) \
struct {
} l;
#else
struct {
} l;
#endif
} CPU_DoubleU;
/* CPU memory access without any memory or io remapping */
/*
* the generic syntax for the memory accesses is:
*
* load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
*
* store: st{type}{size}{endian}_{access_type}(ptr, val)
*
* type is:
* (empty): integer access
* f : float access
*
* sign is:
* (empty): for floats or 32 bit size
* u : unsigned
* s : signed
*
* size is:
* b: 8 bits
* w: 16 bits
* l: 32 bits
* q: 64 bits
*
* endian is:
* (empty): target cpu endianness or 8 bit access
* r : reversed target cpu endianness (not implemented yet)
* be : big endian (not implemented yet)
* le : little endian (not implemented yet)
*
* access_type is:
* raw : host memory access
* user : user mode access using soft MMU
* kernel : kernel mode access using soft MMU
*/
#ifdef VBOX
void remR3GrowDynRange(unsigned long physaddr);
#if 0 /*defined(RT_ARCH_AMD64) && defined(VBOX_STRICT)*/
#else
# define VBOX_CHECK_ADDR(ptr) do { } while (0)
#endif
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
/* float access */
{
union {
float32 f;
uint32_t i;
} u;
return u.f;
}
{
union {
float32 f;
uint32_t i;
} u;
u.f = v;
}
{
CPU_DoubleU u;
return u.d;
}
{
CPU_DoubleU u;
u.d = v;
}
#else /* !VBOX */
{
}
{
}
{
}
it is a system wide setting : bad */
#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
/* conservative code for little endian unaligned accesses */
{
#ifdef __powerpc__
int val;
return val;
#else
return p[0] | (p[1] << 8);
#endif
}
{
#ifdef __powerpc__
int val;
#else
#endif
}
{
#ifdef __powerpc__
int val;
return val;
#else
return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
#endif
}
{
}
{
#ifdef __powerpc__
#else
p[0] = v;
p[1] = v >> 8;
#endif
}
{
#ifdef __powerpc__
#else
p[0] = v;
p[1] = v >> 8;
p[2] = v >> 16;
p[3] = v >> 24;
#endif
}
{
}
/* float access */
{
union {
float32 f;
uint32_t i;
} u;
return u.f;
}
{
union {
float32 f;
uint32_t i;
} u;
u.f = v;
}
{
CPU_DoubleU u;
return u.d;
}
{
CPU_DoubleU u;
u.d = v;
}
#else
{
}
{
}
{
}
{
}
{
}
{
}
{
}
/* float access */
{
}
{
}
{
}
{
}
#endif
#endif /* !VBOX */
#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
{
#if defined(__i386__)
int val;
asm volatile ("movzwl %1, %0\n"
"xchgb %b0, %h0\n"
: "=q" (val)
return val;
#else
return ((b[0] << 8) | b[1]);
#endif
}
{
#if defined(__i386__)
int val;
asm volatile ("movzwl %1, %0\n"
"xchgb %b0, %h0\n"
: "=q" (val)
#else
#endif
}
{
#if defined(__i386__) || defined(__x86_64__)
int val;
asm volatile ("movl %1, %0\n"
"bswap %0\n"
: "=r" (val)
return val;
#else
return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
#endif
}
{
uint32_t a,b;
return (((uint64_t)a<<32)|b);
}
{
#if defined(__i386__)
asm volatile ("xchgb %b0, %h0\n"
"movw %w0, %1\n"
: "=q" (v)
#else
d[0] = v >> 8;
d[1] = v;
#endif
}
{
#if defined(__i386__) || defined(__x86_64__)
asm volatile ("bswap %0\n"
"movl %0, %1\n"
: "=r" (v)
#else
d[0] = v >> 24;
d[1] = v >> 16;
d[2] = v >> 8;
d[3] = v;
#endif
}
{
}
/* float access */
{
union {
float32 f;
uint32_t i;
} u;
return u.f;
}
{
union {
float32 f;
uint32_t i;
} u;
u.f = v;
}
{
CPU_DoubleU u;
return u.d;
}
{
CPU_DoubleU u;
u.d = v;
}
#else
{
}
{
}
{
}
{
}
{
}
{
}
{
}
/* float access */
{
}
{
}
{
}
{
}
#endif
/* target CPU memory access functions */
#if defined(TARGET_WORDS_BIGENDIAN)
#else
#endif
/* MMU memory access macros */
#if defined(CONFIG_USER_ONLY)
/* On some host systems the guest address space is reserved on the host.
* This allows the guest address space to be offset to a convenient location.
*/
//#define GUEST_BASE 0x20000000
#define GUEST_BASE 0
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
#else /* !CONFIG_USER_ONLY */
/* NOTE: we use double casts if pointers and target_ulong have
different sizes */
#endif
#if defined(CONFIG_USER_ONLY)
/* if user mode, no other memory access functions */
#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
#define lduw_kernel(p) lduw_raw(p)
#define ldsw_kernel(p) ldsw_raw(p)
#define ldl_kernel(p) ldl_raw(p)
#define ldfl_kernel(p) ldfl_raw(p)
#define ldfq_kernel(p) ldfq_raw(p)
#define stb_kernel(p, v) stb_raw(p, v)
#define stw_kernel(p, v) stw_raw(p, v)
#define stl_kernel(p, v) stl_raw(p, v)
#define stq_kernel(p, v) stq_raw(p, v)
#define stfl_kernel(p, v) stfl_raw(p, v)
#endif /* defined(CONFIG_USER_ONLY) */
/* page related stuff */
/* ??? These should be the larger of unsigned long and target_ulong. */
extern unsigned long qemu_real_host_page_size;
extern unsigned long qemu_host_page_bits;
extern unsigned long qemu_host_page_size;
extern unsigned long qemu_host_page_mask;
/* same as PROT_xxx */
#define PAGE_READ 0x0001
#define PAGE_WRITE 0x0002
#define PAGE_EXEC 0x0004
#define PAGE_VALID 0x0008
/* original state of the write flag (used when tracking self-modifying
code */
#define PAGE_WRITE_ORG 0x0010
#define SINGLE_CPU_DEFINES
#ifdef SINGLE_CPU_DEFINES
#if defined(TARGET_I386)
#define CPUState CPUX86State
#define cpu_init cpu_x86_init
#define cpu_exec cpu_x86_exec
#define cpu_gen_code cpu_x86_gen_code
#elif defined(TARGET_ARM)
#define CPUState CPUARMState
#define cpu_init cpu_arm_init
#define cpu_exec cpu_arm_exec
#define cpu_gen_code cpu_arm_gen_code
#elif defined(TARGET_SPARC)
#define CPUState CPUSPARCState
#define cpu_init cpu_sparc_init
#define cpu_exec cpu_sparc_exec
#define cpu_gen_code cpu_sparc_gen_code
#elif defined(TARGET_PPC)
#define CPUState CPUPPCState
#define cpu_init cpu_ppc_init
#define cpu_exec cpu_ppc_exec
#define cpu_gen_code cpu_ppc_gen_code
#elif defined(TARGET_M68K)
#define CPUState CPUM68KState
#define cpu_init cpu_m68k_init
#define cpu_exec cpu_m68k_exec
#define cpu_gen_code cpu_m68k_gen_code
#elif defined(TARGET_MIPS)
#define CPUState CPUMIPSState
#define cpu_init cpu_mips_init
#define cpu_exec cpu_mips_exec
#define cpu_gen_code cpu_mips_gen_code
#elif defined(TARGET_SH4)
#define CPUState CPUSH4State
#define cpu_init cpu_sh4_init
#define cpu_exec cpu_sh4_exec
#define cpu_gen_code cpu_sh4_gen_code
#else
#endif
#endif /* SINGLE_CPU_DEFINES */
int flags);
extern CPUState *cpu_single_env;
extern int code_copy_enabled;
#ifdef VBOX
/** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */
#define CPU_INTERRUPT_SINGLE_INSTR 0x0200
/** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */
#define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0400
/** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */
#define CPU_INTERRUPT_RC 0x0800
/** Exit current TB to process an external interrupt request (also in op.c!!) */
#define CPU_INTERRUPT_EXTERNAL_EXIT 0x1000
/** Exit current TB to process an external interrupt request (also in op.c!!) */
#define CPU_INTERRUPT_EXTERNAL_HARD 0x2000
/** Exit current TB to process an external interrupt request (also in op.c!!) */
#define CPU_INTERRUPT_EXTERNAL_TIMER 0x4000
/** Exit current TB to process an external interrupt request (also in op.c!!) */
#define CPU_INTERRUPT_EXTERNAL_DMA 0x8000
#endif /* VBOX */
/* Return the physical page corresponding to a virtual one. Use it
only for debugging because no protection checks are done. Return -1
if no page found. */
#define CPU_LOG_TB_OUT_ASM (1 << 0)
/* define log items */
typedef struct CPULogItem {
int mask;
const char *name;
const char *help;
} CPULogItem;
extern CPULogItem cpu_log_items[];
void cpu_set_log(int log_flags);
void cpu_set_log_filename(const char *filename);
int cpu_str_to_log_mask(const char *str);
/* IO ports API */
/* NOTE: as these functions may be even used when there is an isa
brige on non x86 targets, we always defined them */
#ifndef NO_CPU_IO_DEFS
#endif
/* memory API */
#ifndef VBOX
extern int phys_ram_size;
extern int phys_ram_fd;
extern int phys_ram_size;
#else /* VBOX */
extern RTGCPHYS phys_ram_size;
/** This is required for bounds checking the phys_ram_dirty accesses. */
extern uint32_t phys_ram_dirty_size;
#endif /* VBOX */
#if !defined(VBOX)
extern uint8_t *phys_ram_base;
#endif
extern uint8_t *phys_ram_dirty;
/* physical memory access */
#define IO_MEM_SHIFT 4
#if defined(VBOX)
#endif
/* acts like a ROM when read and like a device when written. As an
exception, the write memory callback gets the ram offset instead of
the physical address */
#define IO_MEM_ROMD (1)
unsigned long size,
unsigned long phys_offset);
int cpu_register_io_memory(int io_index,
void *opaque);
{
}
{
}
#define VGA_DIRTY_FLAG 0x01
#define CODE_DIRTY_FLAG 0x02
/* read dirty bit (return 0 or 1) */
{
#ifdef VBOX
{
/*AssertMsgFailed(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));*/
return 0;
}
#endif
}
int dirty_flags)
{
#ifdef VBOX
{
/*AssertMsgFailed(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));*/
}
#endif
}
{
#ifdef VBOX
{
/*AssertMsgFailed(("cpu_physical_memory_is_dirty: %VGp\n", (RTGCPHYS)addr));*/
return;
}
#endif
}
int dirty_flags);
void dump_exec_info(FILE *f,
/*******************************************/
/* host CPU ticks (if available) */
#if defined(__powerpc__)
{
return tbl;
}
{
return tbl;
}
static inline int64_t cpu_get_real_ticks(void)
{
/* NOTE: we test if wrapping has occurred */
do {
h = get_tbu();
l = get_tbl();
} while (h != h1);
return ((int64_t)h << 32) | l;
}
static inline int64_t cpu_get_real_ticks(void)
{
return val;
}
#elif defined(__x86_64__)
static inline int64_t cpu_get_real_ticks(void)
{
val <<= 32;
return val;
}
static inline int64_t cpu_get_real_ticks(void)
{
return val;
}
static inline int64_t cpu_get_real_ticks(void)
{
return val;
}
#elif defined(__sparc_v9__)
static inline int64_t cpu_get_real_ticks (void)
{
#if defined(_LP64)
return rval;
#else
union {
struct {
} i32;
} rval;
asm volatile("rd %%tick,%1; srlx %1,32,%0"
#endif
}
#else
/* The host CPU doesn't have an easily accessible cycle counter.
Just return a monotonically increasing vlue. This will be totally wrong,
but hopefully better than nothing. */
static inline int64_t cpu_get_real_ticks (void)
{
return ticks++;
}
#endif
/* profiling */
#ifdef CONFIG_PROFILER
static inline int64_t profile_getclock(void)
{
return cpu_get_real_ticks();
}
extern int64_t tlb_flush_time;
extern int64_t kqemu_exec_count;
extern int64_t kqemu_ret_int_count;
extern int64_t kqemu_ret_excp_count;
extern int64_t kqemu_ret_intr_count;
#endif
#ifdef VBOX
#endif /* VBOX */
#endif /* CPU_ALL_H */