exec.c revision e644c861dcf12d54300a0f2f08ea1fa3cabef082
/*
* virtual page mapping and translated block handling
*
* Copyright (c) 2003 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "config.h"
#ifdef _WIN32
#include <windows.h>
#else
#endif
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <inttypes.h>
#include "cpu.h"
#include "exec-all.h"
#ifdef VBOX
#endif
//#define DEBUG_TB_INVALIDATE
//#define DEBUG_FLUSH
//#define DEBUG_TLB
/* make various TB consistency checks */
//#define DEBUG_TB_CHECK
//#define DEBUG_TLB_CHECK
/* threshold to flush the translated code buffer */
#define SMC_BITMAP_USE_THRESHOLD 10
#define MMAP_AREA_START 0x00000000
#define MMAP_AREA_END 0xa8000000
int nb_tbs;
/* any access to the tbs or the page table must use this lock */
#if !defined(VBOX)
int phys_ram_fd;
#endif /* !VBOX */
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
/* in order to optimize self modifying code, we count the number
of lookups we do to a given page to use a bitmap */
unsigned int code_write_count;
#if defined(CONFIG_USER_ONLY)
unsigned long flags;
#endif
} PageDesc;
typedef struct PhysPageDesc {
/* offset in host memory of the page + io_index in the low 12 bits */
unsigned long phys_offset;
} PhysPageDesc;
typedef struct VirtPageDesc {
/* physical address of code page. It is valid only if 'valid_tag'
matches 'virt_valid_tag' */
unsigned int valid_tag;
#if !defined(CONFIG_SOFTMMU)
/* original page access rights. It is valid only if 'valid_tag'
matches 'virt_valid_tag' */
unsigned int prot;
#endif
} VirtPageDesc;
#define L2_BITS 10
static void io_mem_init(void);
unsigned long qemu_real_host_page_size;
unsigned long qemu_host_page_bits;
unsigned long qemu_host_page_size;
unsigned long qemu_host_page_mask;
/* XXX: for system emulation, it could just be an array */
#if !defined(CONFIG_USER_ONLY)
static unsigned int virt_valid_tag;
#endif
/* io memory support */
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
static int io_mem_nb;
#ifndef VBOX
/* log support */
char *logfilename = "/tmp/qemu.log";
int loglevel;
#endif
/* statistics */
static int tlb_flush_count;
static int tb_flush_count;
static int tb_phys_invalidate_count;
static void page_init(void)
{
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
#ifdef _WIN32
{
}
#else
{
start = (unsigned long)code_gen_buffer;
}
#endif
if (qemu_host_page_size == 0)
qemu_host_page_bits = 0;
#if !defined(CONFIG_USER_ONLY)
virt_valid_tag = 1;
#endif
}
{
p = *lp;
if (!p) {
/* allocate if not found */
*lp = p;
}
}
{
PageDesc *p;
if (!p)
return 0;
}
{
PhysPageDesc **lp, *p;
p = *lp;
if (!p) {
/* allocate if not found */
*lp = p;
}
}
{
PhysPageDesc *p;
if (!p)
return 0;
#ifdef VBOX
return p;
#else
#endif
}
#if !defined(CONFIG_USER_ONLY)
{
VirtPageDesc **lp, *p;
/* XXX: should not truncate for 64 bit addresses */
#if TARGET_LONG_BITS > 32
#endif
p = *lp;
if (!p) {
/* allocate if not found */
*lp = p;
}
}
{
VirtPageDesc *p;
if (!p)
return 0;
}
static void virt_page_flush(void)
{
int i, j;
VirtPageDesc *p;
if (virt_valid_tag == 0) {
virt_valid_tag = 1;
for(i = 0; i < L1_SIZE; i++) {
p = l1_virt_map[i];
if (p) {
for(j = 0; j < L2_SIZE; j++)
p[j].valid_tag = 0;
}
}
}
}
#else
static void virt_page_flush(void)
{
}
#endif
void cpu_exec_init(void)
{
if (!code_gen_ptr) {
page_init();
io_mem_init();
}
}
static inline void invalidate_page_bitmap(PageDesc *p)
{
if (p->code_bitmap) {
qemu_free(p->code_bitmap);
p->code_bitmap = NULL;
}
p->code_write_count = 0;
}
/* set to NULL all the 'first_tb' fields in all PageDescs */
static void page_flush_tb(void)
{
int i, j;
PageDesc *p;
for(i = 0; i < L1_SIZE; i++) {
p = l1_map[i];
if (p) {
for(j = 0; j < L2_SIZE; j++) {
p++;
}
}
}
}
/* flush all the translation blocks */
/* XXX: tb_flush is currently not thread safe */
{
#if defined(DEBUG_FLUSH)
printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
#endif
nb_tbs = 0;
/* XXX: flush processor icache at this point if cache flush is
expensive */
}
#ifdef DEBUG_TB_CHECK
static void tb_invalidate_check(unsigned long address)
{
int i;
for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
}
}
}
}
/* verify that all the pages have correct rights for code */
static void tb_page_check(void)
{
for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
}
}
}
}
{
unsigned int n1;
/* suppress any remaining jumps to this TB */
for(;;) {
if (n1 == 2)
break;
}
/* check end of list */
}
}
#endif
/* invalidate one TB */
int next_offset)
{
for(;;) {
break;
}
}
}
{
unsigned int n1;
for(;;) {
break;
}
}
}
{
unsigned int n1;
if (tb1) {
/* find tb(n) in circular list */
for(;;) {
break;
if (n1 == 2) {
} else {
}
}
/* now we can suppress tb(n) from the list */
}
}
/* reset the jump entry 'n' of a TB so that it is not chained to
another TB */
{
}
{
unsigned int h, n1;
tb_invalidated_flag = 1;
/* remove the TB from the hash list */
for(;;) {
/* NOTE: the TB is not necessarily linked in the hash. It
indicates that it is not currently used */
return;
break;
}
}
/* suppress this TB from the two jump lists */
tb_jmp_remove(tb, 0);
/* suppress any remaining jumps to this TB */
for(;;) {
if (n1 == 2)
break;
}
}
#ifdef VBOX
{
#if 1
#else
flags);
if(tb)
{
#ifdef DEBUG
#endif
//Note: this will leak TBs, but the whole cache will be flushed
// when it happens too often
}
#endif
}
# ifdef VBOX_STRICT
/**
* Gets the page offset.
*/
{
return p ? p->phys_offset : 0;
}
# endif /* VBOX_STRICT */
#endif /* VBOX */
{
PageDesc *p;
unsigned int h;
/* remove the TB from the hash list */
h = tb_phys_hash_func(phys_pc);
/* remove the TB from the page list */
}
}
}
{
}
} else {
*tab++ = 0xff;
start += 8;
}
}
}
}
static void build_page_bitmap(PageDesc *p)
{
if (!p->code_bitmap)
return;
n = (long)tb & 3;
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
if (tb_end > TARGET_PAGE_SIZE)
} else {
tb_start = 0;
}
}
}
#ifdef TARGET_HAS_PRECISE_SMC
int cflags)
{
int code_gen_size;
if (!tb) {
/* flush must be done */
/* cannot fail at this point */
}
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
/* check next page if needed */
phys_page2 = -1;
}
}
#endif
/* invalidate all TBs which intersect with the target physical page
starting in range [start;end[. NOTE: start and end must refer to
the same physical page. 'is_cpu_write_access' should be true if called
from a real cpu write access: the virtual CPU will exit the current
TB if code is modified inside this TB. */
int is_cpu_write_access)
{
PageDesc *p;
if (!p)
return;
if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
/* build code bitmap */
}
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all the code */
current_tb_modified = 0;
current_pc = 0; /* avoid warning */
current_cs_base = 0; /* avoid warning */
current_flags = 0; /* avoid warning */
n = (long)tb & 3;
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
} else {
}
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_not_found) {
current_tb_not_found = 0;
current_tb = NULL;
if (env->mem_write_pc) {
/* now we have a real cpu fault */
}
}
if (current_tb == tb &&
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
would require a specialized function to partially
restore the CPU state */
current_tb_modified = 1;
#if defined(TARGET_I386)
#else
#endif
}
#endif /* TARGET_HAS_PRECISE_SMC */
}
}
#if !defined(CONFIG_USER_ONLY)
/* if no code remaining, no need to continue to use slow writes */
if (!p->first_tb) {
if (is_cpu_write_access) {
}
}
#endif
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
}
#endif
}
/* len must be <= 8 and start must be a multiple of len */
{
PageDesc *p;
int offset, b;
#if 0
if (1) {
if (loglevel) {
}
}
#endif
if (!p)
return;
if (p->code_bitmap) {
goto do_invalidate;
} else {
}
}
#if !defined(CONFIG_SOFTMMU)
{
int n, current_flags, current_tb_modified;
PageDesc *p;
#ifdef TARGET_HAS_PRECISE_SMC
#endif
addr &= TARGET_PAGE_MASK;
if (!p)
return;
current_tb_modified = 0;
current_tb = NULL;
current_pc = 0; /* avoid warning */
current_cs_base = 0; /* avoid warning */
current_flags = 0; /* avoid warning */
#ifdef TARGET_HAS_PRECISE_SMC
}
#endif
n = (long)tb & 3;
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
would require a specialized function to partially
restore the CPU state */
current_tb_modified = 1;
#if defined(TARGET_I386)
#else
#endif
}
#endif /* TARGET_HAS_PRECISE_SMC */
}
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
}
#endif
}
#endif
/* add the tb in the target page and protect it if necessary */
unsigned int n, unsigned int page_addr)
{
PageDesc *p;
last_first_tb = p->first_tb;
#if defined(TARGET_HAS_SMC) || 1
#if defined(CONFIG_USER_ONLY)
if (p->flags & PAGE_WRITE) {
int prot;
/* force the host page as non writable (writes will have a
page fault + mprotect overhead) */
prot = 0;
#ifdef DEBUG_TB_INVALIDATE
printf("protecting code page: 0x%08lx\n",
#endif
p->flags &= ~PAGE_WRITE;
}
#else
/* if some code is already present, then the pages are already
protected. So we handle the case where only the first TB is
allocated in a physical page */
if (!last_first_tb) {
}
#endif
#endif /* TARGET_HAS_SMC */
}
/* Allocate a new translation block. Flush the translation buffer if
too many translation blocks or too much generated code. */
{
if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
return NULL;
return tb;
}
/* add a new TB and link it to the physical page tables. phys_page2 is
(-1) to indicate that only one page contains the TB. */
{
unsigned int h;
/* add in the physical hash table */
h = tb_phys_hash_func(phys_pc);
ptb = &tb_phys_hash[h];
/* add in the page list */
if (phys_page2 != -1)
else
#ifdef DEBUG_TB_CHECK
#endif
}
/* link the tb with the other TBs */
{
#if !defined(CONFIG_USER_ONLY)
{
/* save the code memory mappings (needed to invalidate the code) */
#ifdef DEBUG_TLB_CHECK
printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
}
#endif
#if !defined(CONFIG_SOFTMMU)
#endif
}
addr += TARGET_PAGE_SIZE;
#ifdef DEBUG_TLB_CHECK
printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
}
#endif
#if !defined(CONFIG_SOFTMMU)
#endif
}
}
}
#endif
#ifdef USE_CODE_COPY
#endif
/* init original jump addresses */
tb_reset_jump(tb, 0);
}
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
tb[1].tc_ptr. Return NULL if not found */
{
unsigned long v;
if (nb_tbs <= 0)
return NULL;
if (tc_ptr < (unsigned long)code_gen_buffer ||
tc_ptr >= (unsigned long)code_gen_ptr)
return NULL;
/* binary search (cf Knuth) */
m_min = 0;
if (v == tc_ptr)
return tb;
else if (tc_ptr < v) {
m_max = m - 1;
} else {
m_min = m + 1;
}
}
}
{
unsigned int n1;
/* find head of list */
for(;;) {
if (n1 == 2)
break;
}
/* we are now sure now that tb jumps to tb1 */
/* remove tb from the jmp_first list */
for(;;) {
break;
}
/* suppress the jump to next tb in generated code */
tb_reset_jump(tb, n);
/* suppress jumps in the tb on which we could have jumped */
}
}
{
}
{
}
#endif
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
breakpoint is reached */
{
int i;
for(i = 0; i < env->nb_breakpoints; i++) {
return 0;
}
return -1;
return 0;
#else
return -1;
#endif
}
/* remove a breakpoint */
{
int i;
for(i = 0; i < env->nb_breakpoints; i++) {
goto found;
}
return -1;
env->nb_breakpoints--;
return 0;
#else
return -1;
#endif
}
/* enable or disable single step mode. EXCP_DEBUG is returned by the
CPU loop after each instruction */
{
/* must flush all the translated code to avoid inconsistancies */
/* XXX: only flush what is necessary */
}
#endif
}
#ifndef VBOX
/* enable or disable low levels log */
void cpu_set_log(int log_flags)
{
if (!logfile) {
_exit(1);
}
#if !defined(CONFIG_SOFTMMU)
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
{
}
#else
#endif
}
}
void cpu_set_log_filename(const char *filename)
{
}
#endif
/* mask must never be zero, except for A20 change call */
{
static int interrupt_lock;
#if defined(VBOX)
#else /* VBOX */
#endif /* VBOX */
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
interrupt_lock = 0;
}
}
{
#if defined(VBOX)
/*
* Note: the current implementation can be executed by another thread without problems; make sure this remains true
* for future changes!
*/
#else /* !VBOX */
#endif /* !VBOX */
}
#ifndef VBOX
CPULogItem cpu_log_items[] = {
{ CPU_LOG_TB_OUT_ASM, "out_asm",
"show generated host assembly code for each compiled TB" },
{ CPU_LOG_TB_IN_ASM, "in_asm",
"show target assembly code for each compiled TB" },
{ CPU_LOG_TB_OP, "op",
"show micro ops for each compiled TB (only usable if 'in_asm' used)" },
#ifdef TARGET_I386
{ CPU_LOG_TB_OP_OPT, "op_opt",
"show micro ops after optimization for each compiled TB" },
#endif
{ CPU_LOG_INT, "int",
"show interrupts/exceptions in short format" },
{ CPU_LOG_EXEC, "exec",
"show trace before each executed TB (lots of logs)" },
{ CPU_LOG_TB_CPU, "cpu",
"show CPU state before bloc translation" },
#ifdef TARGET_I386
{ CPU_LOG_PCALL, "pcall",
"show protected mode far calls/returns/exceptions" },
#endif
#ifdef DEBUG_IOPORT
{ CPU_LOG_IOPORT, "ioport",
"show all i/o ports accesses" },
#endif
};
{
return 0;
}
/* takes a comma separated list of log masks. Return 0 if error. */
int cpu_str_to_log_mask(const char *str)
{
int mask;
const char *p, *p1;
p = str;
mask = 0;
for(;;) {
if (!p1)
}
} else {
goto found;
}
return 0;
}
if (*p1 != ',')
break;
p = p1 + 1;
}
return mask;
}
#endif /* !VBOX */
#if !defined(VBOX) /* VBOX: we have our own routine. */
{
#ifdef TARGET_I386
#else
#endif
abort();
}
#endif /* !VBOX */
#if !defined(CONFIG_USER_ONLY)
/* NOTE: if flush_global is true, also flush global entries (not
implemented yet) */
{
int i;
#if defined(DEBUG_TLB)
printf("tlb_flush:\n");
#endif
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
for(i = 0; i < CPU_TLB_SIZE; i++) {
}
#if !defined(CONFIG_SOFTMMU)
/* inform raw mode about TLB flush */
#endif
}
{
}
{
int i, n;
PageDesc *p;
#if defined(DEBUG_TLB)
#endif
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
addr &= TARGET_PAGE_MASK;
/* remove from the virtual pc hash table all the TB at this
virtual address */
if (p) {
/* we remove all the links to the TBs in this virtual page */
n = (long)tb & 3;
}
}
}
}
#if !defined(CONFIG_SOFTMMU)
if (addr < MMAP_AREA_END)
/* inform raw mode about TLB page flush */
#endif
}
{
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
}
}
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
{
int i;
addr &= TARGET_PAGE_MASK;
#if !defined(CONFIG_SOFTMMU)
/* NOTE: as we generated the code for this page, it is already at
least readable */
if (addr < MMAP_AREA_END)
#endif
#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
#endif
}
unsigned long phys_addr)
{
}
}
/* update the TLB so that writes in physical page 'phys_addr' are no longer
tested self modifying code */
{
int i;
#ifdef VBOX
#else
phys_addr += (long)phys_ram_base;
#endif
}
{
unsigned long addr;
}
}
}
{
int i;
if (length == 0)
return;
/* we modify the TLB cache so that the dirty bit will be set again
when accessing the range */
#ifdef VBOX
#else
#endif
for(i = 0; i < CPU_TLB_SIZE; i++)
for(i = 0; i < CPU_TLB_SIZE; i++)
#if !defined(CONFIG_SOFTMMU) && !defined(VBOX)
/* XXX: this is expensive */
{
VirtPageDesc *p;
int j;
for(i = 0; i < L1_SIZE; i++) {
p = l1_virt_map[i];
if (p) {
for(j = 0; j < L2_SIZE; j++) {
if (p->valid_tag == virt_valid_tag &&
(p->prot & PROT_WRITE)) {
if (addr < MMAP_AREA_END) {
p->prot & ~PROT_WRITE);
}
}
addr += TARGET_PAGE_SIZE;
p++;
}
}
}
}
#endif
}
unsigned long start)
{
unsigned long addr;
}
}
}
/* update the TLB corresponding to virtual page vaddr and phys addr
addr so that it is no longer dirty */
{
int i;
#ifdef VBOX
{
Log(("phys_ram_dirty exceeded at address %VGp, ignoring\n",
return;
}
#else
#endif
addr &= TARGET_PAGE_MASK;
}
/* add a new TLB entry. At most one entry for a given virtual address
is permitted. Return 0 if OK or 2 if the page could not be mapped
(can only happen in non SOFTMMU mode for I/O pages or pages
conflicting with the host address space). */
int is_user, int is_softmmu)
{
PhysPageDesc *p;
unsigned long pd;
unsigned int index;
unsigned long addend;
int ret;
if (!p) {
} else {
pd = p->phys_offset;
/* NOTE: we also allocate the page at this stage */
}
}
#if defined(DEBUG_TLB)
printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
#endif
ret = 0;
#if !defined(CONFIG_SOFTMMU)
if (is_softmmu)
#endif
{
/* IO memory case */
} else {
/* standard memory */
#ifdef VBOX
#else
#endif
}
} else {
}
if (prot & PAGE_WRITE) {
/* ROM: access is ignored (same as unassigned) */
} else
/* XXX: the PowerPC code seems not ready to handle
self modifying code with DCBI */
#if defined(TARGET_HAS_SMC) || 1
if (first_tb) {
/* if code is present, we use a specific memory
handler. It works only for physical memory access */
} else
#endif
} else {
}
} else {
}
#ifdef VBOX
/* inform raw mode about TLB page change */
#endif
}
#if !defined(CONFIG_SOFTMMU)
else {
/* IO access: no mapping is done as it will be handled by the
soft MMU */
ret = 2;
} else {
void *map_addr;
if (vaddr >= MMAP_AREA_END) {
ret = 2;
} else {
if (prot & PROT_WRITE) {
#if defined(TARGET_HAS_SMC) || 1
first_tb ||
#endif
/* ROM: we do as if code was inside */
/* if code is present, we only map as read only and save the
original mapping */
prot &= ~PAGE_WRITE;
}
}
if (map_addr == MAP_FAILED) {
}
}
}
}
#endif
return ret;
}
/* called from signal handler: invalidate the code and unprotect the
page. Return TRUE if the fault was succesfully handled. */
{
#if !defined(CONFIG_SOFTMMU)
#if defined(DEBUG_TLB)
#endif
addr &= TARGET_PAGE_MASK;
/* if it is not mapped, no need to worry here */
if (addr >= MMAP_AREA_END)
return 0;
if (!vp)
return 0;
/* NOTE: in this case, validate_tag is _not_ tested as it
validates only the code TLB */
return 0;
return 0;
#if defined(DEBUG_TLB)
printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
#endif
/* set the dirty bit */
/* flush the code inside */
return 1;
addr &= TARGET_PAGE_MASK;
/* if it is not mapped, no need to worry here */
if (addr >= MMAP_AREA_END)
return 0;
return 1;
#else
return 0;
#endif
}
#else
{
}
{
}
int is_user, int is_softmmu)
{
return 0;
}
#ifndef VBOX
/* dump memory mappings */
{
PageDesc *p;
fprintf(f, "%-8s %-8s %-8s %s\n",
"start", "end", "size", "prot");
start = -1;
end = -1;
prot = 0;
for(i = 0; i <= L1_SIZE; i++) {
if (i < L1_SIZE)
p = l1_map[i];
else
p = NULL;
for(j = 0;j < L2_SIZE; j++) {
if (!p)
prot1 = 0;
else
if (start != -1) {
fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
}
if (prot1 != 0)
else
start = -1;
}
if (!p)
break;
}
}
}
#endif /* !VBOX */
int page_get_flags(unsigned long address)
{
PageDesc *p;
if (!p)
return 0;
return p->flags;
}
/* modify the flags of a page and invalidate the code if
necessary. The flag PAGE_WRITE_ORG is positionned automatically
depending on PAGE_WRITE */
{
PageDesc *p;
unsigned long addr;
if (flags & PAGE_WRITE)
flags |= PAGE_WRITE_ORG;
#if defined(VBOX)
AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
#endif
/* if the write protection is set, then we invalidate the code
inside */
if (!(p->flags & PAGE_WRITE) &&
(flags & PAGE_WRITE) &&
p->first_tb) {
}
}
}
/* called from signal handler: invalidate the code and unprotect the
page. Return TRUE if the fault was succesfully handled. */
{
if (!p1)
return 0;
p = p1;
prot = 0;
p++;
}
/* if the page was really writable, then we change its
protection back to writable */
if (prot & PAGE_WRITE_ORG) {
/* and since the content will be modified, we must invalidate
the corresponding translated code. */
#ifdef DEBUG_TB_CHECK
#endif
return 1;
}
}
return 0;
}
/* call this function when system calls directly modify a memory area */
{
}
}
{
}
#endif /* defined(CONFIG_USER_ONLY) */
/* register physical memory. 'size' must be a multiple of the target
page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
io memory page */
unsigned long size,
unsigned long phys_offset)
{
PhysPageDesc *p;
p->phys_offset = phys_offset;
#ifdef VBOX
#else
#endif
}
}
{
return 0;
}
{
}
};
};
/* self modifying code support in soft mmu mode : writing to a page
containing code comes to these functions */
{
unsigned long phys_addr;
#ifdef VBOX
#else
#endif
#if !defined(CONFIG_USER_ONLY)
#endif
}
{
unsigned long phys_addr;
#ifdef VBOX
#else
#endif
#if !defined(CONFIG_USER_ONLY)
#endif
}
{
unsigned long phys_addr;
#ifdef VBOX
#else
#endif
#if !defined(CONFIG_USER_ONLY)
#endif
}
NULL, /* never used */
NULL, /* never used */
NULL, /* never used */
};
};
{
}
{
}
{
}
};
static void io_mem_init(void)
{
cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
#ifdef VBOX
cpu_register_io_memory(IO_MEM_RAM_MISSING >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
io_mem_nb = 6;
#else
io_mem_nb = 5;
#endif
#if !defined(VBOX) /* VBOX: we do this later when the RAM is allocated. */
/* alloc dirty bits array */
#endif /* !VBOX */
}
/* mem_read and mem_write are arrays of functions containing the
function to access byte (index 0), word (index 1) and dword (index
2). All functions must be supplied. If io_index is non zero, the
corresponding io zone is modified. If it is zero, a new io zone is
allocated. The return value can be used with
cpu_register_physical_memory(). (-1) is returned if error. */
int cpu_register_io_memory(int io_index,
void *opaque)
{
int i;
if (io_index <= 0) {
if (io_index >= IO_MEM_NB_ENTRIES)
return -1;
} else {
if (io_index >= IO_MEM_NB_ENTRIES)
return -1;
}
for(i = 0;i < 3; i++) {
}
return io_index << IO_MEM_SHIFT;
}
{
}
{
}
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
{
int l, flags;
while (len > 0) {
if (l > len)
l = len;
if (!(flags & PAGE_VALID))
return;
if (is_write) {
if (!(flags & PAGE_WRITE))
return;
} else {
return;
}
len -= l;
buf += l;
addr += l;
}
}
/* never used */
{
return 0;
}
{
}
{
}
#else
{
int l, io_index;
unsigned long pd;
PhysPageDesc *p;
while (len > 0) {
if (l > len)
l = len;
if (!p) {
} else {
pd = p->phys_offset;
}
if (is_write) {
if ((pd & ~TARGET_PAGE_MASK) != 0) {
/* 32 bit read access */
l = 4;
/* 16 bit read access */
l = 2;
} else {
/* 8 bit access */
l = 1;
}
} else {
unsigned long addr1;
/* RAM case */
#ifdef VBOX
#else
#endif
/* invalidate code */
/* set dirty bit */
}
} else {
/* I/O case */
/* 32 bit read access */
l = 4;
/* 16 bit read access */
l = 2;
} else {
/* 8 bit access */
l = 1;
}
} else {
/* RAM case */
#ifdef VBOX
#else
(addr & ~TARGET_PAGE_MASK);
#endif
}
}
len -= l;
buf += l;
addr += l;
}
}
/* warning: addr must be aligned */
{
int io_index;
unsigned long pd;
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
/* I/O case */
} else {
/* RAM case */
#ifdef VBOX
#else
(addr & ~TARGET_PAGE_MASK);
#endif
}
return val;
}
/* warning: addr must be aligned. The ram page is not masked as dirty
and the code inside is not invalidated. It is useful if the dirty
bits are used to track modified PTEs */
{
int io_index;
unsigned long pd;
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
if ((pd & ~TARGET_PAGE_MASK) != 0) {
} else {
#ifdef VBOX
#else
(addr & ~TARGET_PAGE_MASK);
#endif
}
}
/* warning: addr must be aligned */
/* XXX: optimize code invalidation test */
{
int io_index;
unsigned long pd;
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
if ((pd & ~TARGET_PAGE_MASK) != 0) {
} else {
unsigned long addr1;
/* RAM case */
#ifdef VBOX
#else
#endif
/* invalidate code */
/* set dirty bit */
}
}
#endif
/* virtual memory access for debug */
{
int l;
while (len > 0) {
/* if no physical page mapped, return an error */
if (phys_addr == -1)
return -1;
if (l > len)
l = len;
len -= l;
buf += l;
addr += l;
}
return 0;
}
#ifndef VBOX
void dump_exec_info(FILE *f,
{
int i, target_code_size, max_target_code_size;
target_code_size = 0;
max_target_code_size = 0;
cross_page = 0;
direct_jmp_count = 0;
direct_jmp2_count = 0;
for(i = 0; i < nb_tbs; i++) {
cross_page++;
}
}
}
/* XXX: avoid using doubles ? */
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
}
#endif /* !VBOX */
#if !defined(CONFIG_USER_ONLY)
#define env cpu_single_env
#define SOFTMMU_CODE_ACCESS
#define SHIFT 0
#include "softmmu_template.h"
#define SHIFT 1
#include "softmmu_template.h"
#define SHIFT 2
#include "softmmu_template.h"
#define SHIFT 3
#include "softmmu_template.h"
#endif