exec.c revision 3d40f685fa5cdd9cb665ae3cbf5f76113dafcb99
/*
* virtual page mapping and translated block handling
*
* Copyright (c) 2003 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Oracle elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#include "config.h"
#ifndef VBOX
#ifdef _WIN32
#include <windows.h>
#else
#endif
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <inttypes.h>
#else /* VBOX */
# include <stdlib.h>
# include <stdio.h>
#endif /* VBOX */
#include "cpu.h"
#include "exec-all.h"
#include "qemu-common.h"
#include "tcg.h"
#ifndef VBOX
#endif /* !VBOX */
#include "osdep.h"
#include "kvm.h"
#include "qemu-timer.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
#include <signal.h>
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#if __FreeBSD_version >= 700104
#define HAVE_KINFO_GETVMMAP
#define _KERNEL
#include <libutil.h>
#endif
#endif
#endif
//#define DEBUG_TB_INVALIDATE
//#define DEBUG_FLUSH
//#define DEBUG_TLB
//#define DEBUG_UNASSIGNED
/* make various TB consistency checks */
//#define DEBUG_TB_CHECK
//#define DEBUG_TLB_CHECK
//#define DEBUG_IOPORT
//#define DEBUG_SUBPAGE
#if !defined(CONFIG_USER_ONLY)
/* TB consistency checks only implemented for usermode emulation. */
#endif
#define SMC_BITMAP_USE_THRESHOLD 10
static TranslationBlock *tbs;
static int code_gen_max_blocks;
static int nb_tbs;
/* any access to the tbs or the page table must use this lock */
#ifndef VBOX
#if defined(__arm__) || defined(__sparc_v9__)
/* The prologue must be reachable with a direct jump. ARM and Sparc64
have limited branch ranges (possibly also PPC) so place it in a
section close to code segment. */
#define code_gen_section \
/* Maximum alignment for Win32 is 16. */
#define code_gen_section \
#else
#define code_gen_section \
#endif
#else /* VBOX */
extern uint8_t *code_gen_prologue;
#endif /* VBOX */
static uint8_t *code_gen_buffer;
static size_t code_gen_buffer_size;
/* threshold to flush the translated code buffer */
static size_t code_gen_buffer_max_size;
static uint8_t *code_gen_ptr;
#if !defined(CONFIG_USER_ONLY)
# ifndef VBOX
int phys_ram_fd;
static int in_migration;
# endif /* !VBOX */
#endif
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
/* 0 = Do not count executed instructions.
1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
int use_icount = 0;
/* Current instruction counter. While executing translated code this may
include some instructions that have not yet been executed. */
typedef struct PageDesc {
/* list of TBs intersecting this ram page */
/* in order to optimize self modifying code, we count the number
of lookups we do to a given page to use a bitmap */
unsigned int code_write_count;
#if defined(CONFIG_USER_ONLY)
unsigned long flags;
#endif
} PageDesc;
/* In system mode we want L1_MAP to be based on ram offsets,
while in user mode we want it to be based on virtual addresses. */
#if !defined(CONFIG_USER_ONLY)
# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
#else
#endif
#else
#endif
/* Size of the L2 (and L3, etc) page tables. */
#define L2_BITS 10
/* The bits remaining after N lower levels of page tables. */
#define P_L1_BITS_REM \
#define V_L1_BITS_REM \
/* Size of the L1 page table. Avoid silly small sizes. */
#if P_L1_BITS_REM < 4
#else
#define P_L1_BITS P_L1_BITS_REM
#endif
#if V_L1_BITS_REM < 4
#else
#define V_L1_BITS V_L1_BITS_REM
#endif
/* This is a multi-level map on the virtual address space.
The bottom level has pointers to PageDesc. */
#if !defined(CONFIG_USER_ONLY)
typedef struct PhysPageDesc {
/* offset in host memory of the page + io_index in the low bits */
} PhysPageDesc;
/* This is a multi-level map on the physical address space.
The bottom level has pointers to PhysPageDesc. */
static void *l1_phys_map[P_L1_SIZE];
static void io_mem_init(void);
/* io memory support */
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
static char io_mem_used[IO_MEM_NB_ENTRIES];
static int io_mem_watch;
#endif
#ifndef VBOX
/* log support */
#ifdef WIN32
static const char *logfilename = "qemu.log";
#else
static const char *logfilename = "/tmp/qemu.log";
#endif
#endif /* !VBOX */
int loglevel;
#ifndef VBOX
static int log_append = 0;
#endif /* !VBOX */
/* statistics */
#ifndef VBOX
#if !defined(CONFIG_USER_ONLY)
static int tlb_flush_count;
#endif
static int tb_flush_count;
static int tb_phys_invalidate_count;
#else /* VBOX - Resettable U32 stats, see VBoxRecompiler.c. */
#endif /* VBOX */
#ifndef VBOX
#ifdef _WIN32
{
}
#else
{
page_size = getpagesize();
}
#endif
#else /* VBOX */
{
}
#endif /* VBOX */
static void page_init(void)
{
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
#ifdef VBOX
#else /* !VBOX */
#ifdef _WIN32
{
}
#else
#endif
#endif /* !VBOX */
if (qemu_host_page_size == 0)
qemu_host_page_bits = 0;
#ifndef VBOX /* We use other means to set reserved bit on our pages */
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
{
#ifdef HAVE_KINFO_GETVMMAP
struct kinfo_vmentry *freep;
int i, cnt;
if (freep) {
mmap_lock();
for (i = 0; i < cnt; i++) {
} else {
#endif
}
}
}
mmap_unlock();
}
#else
FILE *f;
if (f) {
mmap_lock();
do {
int n;
} else {
}
}
} while (!feof(f));
fclose(f);
mmap_unlock();
}
#endif
}
#endif
#endif /* !VBOX */
}
{
void **lp;
int i;
#if defined(CONFIG_USER_ONLY)
/* We can't use qemu_malloc because it may recurse into a locked mutex. */
do { \
} while (0)
#else
do { P = qemu_mallocz(SIZE); } while (0)
#endif
/* Level 1. Always allocated. */
/* Level 2..N-1. */
void **p = *lp;
if (p == NULL) {
if (!alloc) {
return NULL;
}
*lp = p;
}
}
if (!alloc) {
return NULL;
}
}
}
{
return page_find_alloc(index, 0);
}
#if !defined(CONFIG_USER_ONLY)
{
void **lp;
int i;
/* Level 1. Always allocated. */
/* Level 2..N-1. */
void **p = *lp;
if (p == NULL) {
if (!alloc) {
return NULL;
}
}
}
int i;
if (!alloc) {
return NULL;
}
for (i = 0; i < L2_SIZE; i++) {
}
}
}
{
return phys_page_find_alloc(index, 0);
}
#define mmap_lock() do { } while(0)
#define mmap_unlock() do { } while(0)
#endif
#ifdef VBOX /* We don't need such huge codegen buffer size, as execute
most of the code in raw or hwacc mode. */
#else /* !VBOX */
#endif /* !VBOX */
#if defined(CONFIG_USER_ONLY)
/* Currently it is not recommended to allocate big chunks of data in
user mode. It will change when a dedicated libc will be used */
#define USE_STATIC_CODE_GEN_BUFFER
#endif
#if defined(VBOX) && defined(USE_STATIC_CODE_GEN_BUFFER)
# error "VBox allocates codegen buffer dynamically"
#endif
#ifdef USE_STATIC_CODE_GEN_BUFFER
#endif
{
#ifdef USE_STATIC_CODE_GEN_BUFFER
#else
# ifdef VBOX
/* We cannot use phys_ram_size here, as it's 0 now,
* it only gets initialized once RAM registration callback
* (REMR3NotifyPhysRamRegister()) called.
*/
# else /* !VBOX */
if (code_gen_buffer_size == 0) {
#if defined(CONFIG_USER_ONLY)
/* in user mode, phys_ram_size is not meaningful */
#else
/* XXX: needs adjustments */
#endif
}
# endif /* !VBOX */
/* The code gen buffer location may have constraints depending on
the host cpu and OS */
# ifdef VBOX
if (!code_gen_buffer) {
LogRel(("REM: failed allocate codegen buffer %lld\n",
return;
}
# else /* !VBOX */
#if defined(__linux__)
{
int flags;
#if defined(__x86_64__)
/* Cannot map more than that */
#elif defined(__sparc_v9__)
// Map the buffer below 2G, so we can use direct calls and branches
start = (void *) 0x60000000UL;
/* Map the buffer below 32M, so we can use direct calls and branches */
start = (void *) 0x01000000UL;
/* Map the buffer so that we can use direct calls and branches. */
/* We have a +- 4GB range on the branches; leave some slop. */
}
start = (void *)0x90000000UL;
#endif
flags, -1, 0);
if (code_gen_buffer == MAP_FAILED) {
exit(1);
}
}
{
int flags;
#if defined(__x86_64__)
/* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
* 0x40000000 is free */
addr = (void *)0x40000000;
/* Cannot map more than that */
#endif
flags, -1, 0);
if (code_gen_buffer == MAP_FAILED) {
exit(1);
}
}
#else
#endif
# endif /* !VBOX */
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
#ifndef VBOX /** @todo r=bird: why are we different? */
#else
#endif
}
/* Must be called before using the QEMU cpus. 'tb_size' is the size
(in bytes) allocated to the translation buffer. Zero means default
size. */
{
cpu_gen_init();
page_init();
#if !defined(CONFIG_USER_ONLY)
io_mem_init();
#endif
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
/* There's no guest base to take into account, so go ahead and
initialize the prologue now. */
#endif
}
#ifndef VBOX
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
{
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
version_id is increased. */
return 0;
}
static const VMStateDescription vmstate_cpu_common = {
.name = "cpu_common",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.fields = (VMStateField []) {
}
};
#endif
{
while (env) {
break;
}
return env;
}
#endif /* !VBOX */
{
int cpu_index;
#if defined(CONFIG_USER_ONLY)
#endif
cpu_index = 0;
cpu_index++;
}
#ifndef VBOX
#if defined(CONFIG_USER_ONLY)
#endif
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
#endif
#endif /* !VBOX */
}
static inline void invalidate_page_bitmap(PageDesc *p)
{
if (p->code_bitmap) {
qemu_free(p->code_bitmap);
p->code_bitmap = NULL;
}
p->code_write_count = 0;
}
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
{
int i;
return;
}
if (level == 0) {
for (i = 0; i < L2_SIZE; ++i) {
invalidate_page_bitmap(pd + i);
}
} else {
for (i = 0; i < L2_SIZE; ++i) {
}
}
}
static void page_flush_tb(void)
{
int i;
for (i = 0; i < V_L1_SIZE; i++) {
}
}
/* flush all the translation blocks */
/* XXX: tb_flush is currently not thread safe */
{
#ifdef VBOX
#endif
#if defined(DEBUG_FLUSH)
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
(unsigned long)(code_gen_ptr - code_gen_buffer),
#endif
nb_tbs = 0;
}
/* XXX: flush processor icache at this point if cache flush is
expensive */
#ifdef VBOX
#endif
}
#ifdef DEBUG_TB_CHECK
{
int i;
for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
" PC=%08lx size=%04x\n",
}
}
}
}
/* verify that all the pages have correct rights for code */
static void tb_page_check(void)
{
for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
}
}
}
}
#endif
/* invalidate one TB */
int next_offset)
{
for(;;) {
break;
}
}
}
{
unsigned int n1;
for(;;) {
break;
}
}
}
{
unsigned int n1;
if (tb1) {
/* find tb(n) in circular list */
for(;;) {
break;
if (n1 == 2) {
} else {
}
}
/* now we can suppress tb(n) from the list */
}
}
/* reset the jump entry 'n' of a TB so that it is not chained to
another TB */
{
}
{
PageDesc *p;
unsigned int h, n1;
/* remove the TB from the hash list */
h = tb_phys_hash_func(phys_pc);
/* remove the TB from the page list */
}
}
tb_invalidated_flag = 1;
/* remove the TB from the hash list */
}
/* suppress this TB from the two jump lists */
tb_jmp_remove(tb, 0);
/* suppress any remaining jumps to this TB */
for(;;) {
if (n1 == 2)
break;
}
}
#ifdef VBOX
{
# if 1
# else
flags);
if(tb)
{
# ifdef DEBUG
# endif
//Note: this will leak TBs, but the whole cache will be flushed
// when it happens too often
}
# endif
}
# ifdef VBOX_STRICT
/**
* Gets the page offset.
*/
{
return p ? p->phys_offset : 0;
}
# endif /* VBOX_STRICT */
#endif /* VBOX */
{
}
} else {
*tab++ = 0xff;
start += 8;
}
}
}
}
static void build_page_bitmap(PageDesc *p)
{
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
if (tb_end > TARGET_PAGE_SIZE)
} else {
tb_start = 0;
}
}
}
{
int code_gen_size;
if (!tb) {
/* flush must be done */
/* cannot fail at this point */
/* Don't forget to invalidate previous TB info. */
tb_invalidated_flag = 1;
}
code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
/* check next page if needed */
phys_page2 = -1;
}
return tb;
}
/* invalidate all TBs which intersect with the target physical page
starting in range [start;end[. NOTE: start and end must refer to
the same physical page. 'is_cpu_write_access' should be true if called
from a real cpu write access: the virtual CPU will exit the current
TB if code is modified inside this TB. */
int is_cpu_write_access)
{
PageDesc *p;
int n;
#ifdef TARGET_HAS_PRECISE_SMC
int current_tb_modified = 0;
target_ulong current_pc = 0;
int current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */
if (!p)
return;
if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
/* build code bitmap */
}
/* we remove all the TBs in the range [start, end[ */
/* XXX: see if in some cases it could be faster to invalidate all the code */
/* NOTE: this is subtle as a TB may span two physical pages */
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
} else {
}
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_not_found) {
current_tb_not_found = 0;
current_tb = NULL;
/* now we have a real cpu fault */
}
}
if (current_tb == tb &&
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
would require a specialized function to partially
restore the CPU state */
current_tb_modified = 1;
}
#endif /* TARGET_HAS_PRECISE_SMC */
/* we need to do that to handle the case where a signal
occurs while doing tb_phys_invalidate() */
if (env) {
}
if (env) {
}
}
}
#if !defined(CONFIG_USER_ONLY)
/* if no code remaining, no need to continue to use slow writes */
if (!p->first_tb) {
if (is_cpu_write_access) {
}
}
#endif
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
}
#endif
}
/* len must be <= 8 and start must be a multiple of len */
{
PageDesc *p;
int offset, b;
#if 0
if (1) {
qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
}
#endif
if (!p)
return;
if (p->code_bitmap) {
goto do_invalidate;
} else {
}
}
#if !defined(CONFIG_SOFTMMU)
{
PageDesc *p;
int n;
#ifdef TARGET_HAS_PRECISE_SMC
int current_tb_modified = 0;
target_ulong current_pc = 0;
int current_flags = 0;
#endif
addr &= TARGET_PAGE_MASK;
if (!p)
return;
#ifdef TARGET_HAS_PRECISE_SMC
}
#endif
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
/* If we are modifying the current TB, we must stop
its execution. We could be more precise by checking
that the modification is after the current PC, but it
would require a specialized function to partially
restore the CPU state */
current_tb_modified = 1;
}
#endif /* TARGET_HAS_PRECISE_SMC */
}
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
}
#endif
}
#endif
/* add the tb in the target page and protect it if necessary */
unsigned int n, tb_page_addr_t page_addr)
{
PageDesc *p;
last_first_tb = p->first_tb;
#if defined(TARGET_HAS_SMC) || 1
#if defined(CONFIG_USER_ONLY)
if (p->flags & PAGE_WRITE) {
int prot;
/* force the host page as non writable (writes will have a
page fault + mprotect overhead) */
prot = 0;
addr += TARGET_PAGE_SIZE) {
if (!p2)
continue;
}
#ifdef DEBUG_TB_INVALIDATE
#endif
}
#else
/* if some code is already present, then the pages are already
protected. So we handle the case where only the first TB is
allocated in a physical page */
if (!last_first_tb) {
}
#endif
#endif /* TARGET_HAS_SMC */
}
/* Allocate a new translation block. Flush the translation buffer if
too many translation blocks or too much generated code. */
{
if (nb_tbs >= code_gen_max_blocks ||
return NULL;
return tb;
}
{
/* In practice this is mostly used for single use temporary TB
Ignore the hard cases and just back up if this TB happens to
be the last one generated. */
nb_tbs--;
}
}
/* add a new TB and link it to the physical page tables. phys_page2 is
(-1) to indicate that only one page contains the TB. */
{
unsigned int h;
/* Grab the mmap lock to stop another thread invalidating this TB
before we are done. */
mmap_lock();
/* add in the physical hash table */
h = tb_phys_hash_func(phys_pc);
ptb = &tb_phys_hash[h];
/* add in the page list */
if (phys_page2 != -1)
else
/* init original jump addresses */
tb_reset_jump(tb, 0);
#ifdef DEBUG_TB_CHECK
#endif
mmap_unlock();
}
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
tb[1].tc_ptr. Return NULL if not found */
{
uintptr_t v;
if (nb_tbs <= 0)
return NULL;
return NULL;
/* binary search (cf Knuth) */
m_min = 0;
if (v == tc_ptr)
return tb;
else if (tc_ptr < v) {
m_max = m - 1;
} else {
m_min = m + 1;
}
}
}
{
unsigned int n1;
/* find head of list */
for(;;) {
if (n1 == 2)
break;
}
/* we are now sure now that tb jumps to tb1 */
/* remove tb from the jmp_first list */
for(;;) {
break;
}
/* suppress the jump to next tb in generated code */
tb_reset_jump(tb, n);
/* suppress jumps in the tb on which we could have jumped */
}
}
{
}
#if defined(TARGET_HAS_ICE)
#if defined(CONFIG_USER_ONLY)
{
}
#else
{
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
}
#endif
#endif /* TARGET_HAS_ICE */
#if defined(CONFIG_USER_ONLY)
{
}
{
return -ENOSYS;
}
#else
/* Add a watchpoint. */
{
/* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
#ifndef VBOX
return -EINVAL;
#else
return VERR_INVALID_PARAMETER;
#endif
}
/* keep all GDB-injected watchpoints in front */
else
if (watchpoint)
*watchpoint = wp;
return 0;
}
/* Remove a specific watchpoint. */
int flags)
{
return 0;
}
}
#ifndef VBOX
return -ENOENT;
#else
return VERR_NOT_FOUND;
#endif
}
/* Remove a specific watchpoint by reference. */
{
}
/* Remove all matching watchpoints. */
{
}
}
#endif
/* Add a breakpoint. */
{
#if defined(TARGET_HAS_ICE)
/* keep all GDB-injected breakpoints in front */
else
if (breakpoint)
*breakpoint = bp;
return 0;
#else
return -ENOSYS;
#endif
}
/* Remove a specific breakpoint. */
{
#if defined(TARGET_HAS_ICE)
return 0;
}
}
# ifndef VBOX
return -ENOENT;
# else
return VERR_NOT_FOUND;
# endif
#else
return -ENOSYS;
#endif
}
/* Remove a specific breakpoint by reference. */
{
#if defined(TARGET_HAS_ICE)
#endif
}
/* Remove all matching breakpoints. */
{
#if defined(TARGET_HAS_ICE)
}
#endif
}
/* enable or disable single step mode. EXCP_DEBUG is returned by the
CPU loop after each instruction */
{
#if defined(TARGET_HAS_ICE)
if (kvm_enabled())
else {
/* must flush all the translated code to avoid inconsistencies */
/* XXX: only flush what is necessary */
}
}
#endif
}
#ifndef VBOX
/* enable or disable low levels log */
void cpu_set_log(int log_flags)
{
if (!logfile) {
_exit(1);
}
#if !defined(CONFIG_SOFTMMU)
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
{
static char logfile_buf[4096];
}
/* Win32 doesn't support line-buffering and requires size >= 2 */
#endif
log_append = 1;
}
}
}
void cpu_set_log_filename(const char *filename)
{
if (logfile) {
}
}
#endif /* !VBOX */
{
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace
emulation this often isn't actually as bad as it sounds. Often
signals are used primarily to interrupt blocking syscalls. */
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
if (tb) {
}
}
/* mask must never be zero, except for A20 change call */
{
int old_mask;
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
#ifndef VBOX
#ifndef CONFIG_USER_ONLY
/*
* If called from iothread context, wake the target cpu in
* case its halted.
*/
if (!qemu_cpu_self(env)) {
return;
}
#endif
#endif /* !VBOX */
if (use_icount) {
#ifndef CONFIG_USER_ONLY
}
#endif
} else {
}
}
{
#ifdef VBOX
/*
* Note: the current implementation can be executed by another thread without problems; make sure this remains true
* for future changes!
*/
#else /* !VBOX */
#endif /* !VBOX */
}
{
}
#ifndef VBOX
const CPULogItem cpu_log_items[] = {
{ CPU_LOG_TB_OUT_ASM, "out_asm",
"show generated host assembly code for each compiled TB" },
{ CPU_LOG_TB_IN_ASM, "in_asm",
"show target assembly code for each compiled TB" },
{ CPU_LOG_TB_OP, "op",
"show micro ops for each compiled TB" },
{ CPU_LOG_TB_OP_OPT, "op_opt",
"show micro ops "
#ifdef TARGET_I386
"before eflags optimization and "
#endif
"after liveness analysis" },
{ CPU_LOG_INT, "int",
"show interrupts/exceptions in short format" },
{ CPU_LOG_EXEC, "exec",
"show trace before each executed TB (lots of logs)" },
{ CPU_LOG_TB_CPU, "cpu",
"show CPU state before block translation" },
#ifdef TARGET_I386
{ CPU_LOG_PCALL, "pcall",
"show protected mode far calls/returns/exceptions" },
{ CPU_LOG_RESET, "cpu_reset",
"show CPU state before CPU resets" },
#endif
#ifdef DEBUG_IOPORT
{ CPU_LOG_IOPORT, "ioport",
"show all i/o ports accesses" },
#endif
};
#ifndef CONFIG_USER_ONLY
{
}
}
{
if (r < 0)
return r;
}
return 0;
}
static int cpu_notify_migration_log(int enable)
{
if (r < 0)
return r;
}
return 0;
}
{
int i;
return;
}
if (level == 0) {
for (i = 0; i < L2_SIZE; ++i) {
}
}
} else {
for (i = 0; i < L2_SIZE; ++i) {
}
}
}
{
int i;
for (i = 0; i < P_L1_SIZE; ++i) {
l1_phys_map + 1);
}
}
{
}
{
}
#endif
{
return 0;
}
/* takes a comma separated list of log masks. Return 0 if error. */
int cpu_str_to_log_mask(const char *str)
{
const CPULogItem *item;
int mask;
const char *p, *p1;
p = str;
mask = 0;
for(;;) {
if (!p1)
}
} else {
goto found;
}
return 0;
}
if (*p1 != ',')
break;
p = p1 + 1;
}
return mask;
}
{
#ifdef TARGET_I386
#else
#endif
if (qemu_log_enabled()) {
qemu_log("qemu: fatal: ");
qemu_log("\n");
#ifdef TARGET_I386
#else
log_cpu_state(env, 0);
#endif
}
#if defined(CONFIG_USER_ONLY)
{
}
#endif
abort();
}
{
#if defined(TARGET_HAS_ICE)
#endif
/* Preserve chaining and index. */
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */
#if defined(TARGET_HAS_ICE)
}
}
#endif
return new_env;
}
#endif /* !VBOX */
#if !defined(CONFIG_USER_ONLY)
{
unsigned int i;
/* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
i = tb_jmp_cache_hash_page(addr);
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
#ifdef VBOX
/* inform raw mode about TLB page flush */
#endif /* VBOX */
}
static CPUTLBEntry s_cputlb_empty_entry = {
.addr_read = -1,
.addr_write = -1,
.addr_code = -1,
.addend = -1,
};
/* NOTE: if flush_global is true, also flush global entries (not
implemented yet) */
{
int i;
#ifdef VBOX
#endif
#if defined(DEBUG_TLB)
printf("tlb_flush:\n");
#endif
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
for(i = 0; i < CPU_TLB_SIZE; i++) {
int mmu_idx;
}
}
env->tlb_flush_mask = 0;
#ifdef VBOX
/* inform raw mode about TLB flush */
#endif /* VBOX */
}
{
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
(TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
}
}
{
int i;
int mmu_idx;
#if defined(DEBUG_TLB)
#endif
/* Check if we need to flush due to large pages. */
#if defined(DEBUG_TLB)
printf("tlb_flush_page: forced full flush ("
#endif
return;
}
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
addr &= TARGET_PAGE_MASK;
}
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
{
#if defined(VBOX) && defined(REM_MONITOR_CODE_PAGES)
/** @todo Retest this? This function has changed... */
#endif /* VBOX */
}
/* update the TLB so that writes in physical page 'phys_addr' are no longer
tested for self modifying code */
{
}
{
#ifdef VBOX
if (start & 3)
return;
#endif /* VBOX */
}
}
}
/* Note: start and end must be within the same ram block. */
int dirty_flags)
{
int i;
if (length == 0)
return;
/* we modify the TLB cache so that the dirty bit will be set again
when accessing the range */
#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
/* Chek that we don't span multiple blocks - this breaks the
address comparisons below. */
abort();
}
#else
start1 = (uintptr_t)remR3TlbGCPhys2Ptr(first_cpu, start, 1 /*fWritable*/); /** @todo page replacing (sharing or read only) may cause trouble, fix interface/whatever. */
#endif
int mmu_idx;
for(i = 0; i < CPU_TLB_SIZE; i++)
}
}
}
#ifndef VBOX
{
int ret = 0;
return ret;
}
int cpu_physical_memory_get_dirty_tracking(void)
{
return in_migration;
}
#endif /* !VBOX */
{
#ifndef VBOX
int ret;
return ret;
#else /* VBOX */
return 0;
#endif /* VBOX */
}
#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
{
#ifndef VBOX
void *p;
#endif
#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
if (!cpu_physical_memory_is_dirty(ram_addr)) {
}
}
}
/* update the TLB according to the current state of the dirty bits */
{
int i;
int mmu_idx;
for(i = 0; i < CPU_TLB_SIZE; i++)
#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
}
}
{
}
/* update the TLB corresponding to virtual page vaddr
so that it is no longer dirty */
{
int i;
int mmu_idx;
}
/* Our TLB does not support large pages, so remember the area covered by
large pages and trigger a full TLB flush if these are invalidated. */
{
return;
}
/* Extend the existing region to include the new page.
This is a compromise between unnecessary flushes and the cost
of maintaining a full variable size TLB. */
mask <<= 1;
}
}
/* Add a new TLB entry. At most one entry for a given virtual address
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
supplied size is only used by tlb_flush_page. */
{
PhysPageDesc *p;
unsigned int index;
#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
#endif
if (size != TARGET_PAGE_SIZE) {
}
if (!p) {
} else {
pd = p->phys_offset;
}
#if defined(DEBUG_TLB)
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d size=" TARGET_FMT_lx " pd=0x%08lx\n",
#endif
/* IO memory case (romd handled later) */
}
#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
#else
/** @todo this is racing the phys_page_find call above since it may register
* a new chunk of memory... */
#endif
/* Normal RAM. */
iotlb |= IO_MEM_NOTDIRTY;
else
iotlb |= IO_MEM_ROM;
} else {
/* IO handlers are currently passed a physical address.
It would be nice to pass an offset from the base address
of that region. This would avoid having to special case RAM,
and avoid full address decoding in every device.
We can't use the high bits of pd for this because
IO_MEM_ROMD uses these as a ram address. */
if (p) {
iotlb += p->region_offset;
} else {
}
}
#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
if (addend & 0x3)
{
if (addend & 0x2)
{
/* catch write */
write_mods |= TLB_MMIO;
}
else if (addend & 0x1)
{
/* catch all */
{
write_mods |= TLB_MMIO;
}
}
if ((iotlb & ~TARGET_PAGE_MASK) == 0)
}
#endif
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
/* Avoid trapping reads of pages with a write breakpoint. */
break;
}
}
}
} else {
}
} else {
}
if (prot & PAGE_WRITE) {
(pd & IO_MEM_ROMD)) {
/* Write access calls the I/O callback. */
} else {
}
} else {
}
#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
if (prot & PAGE_WRITE)
#endif
#ifdef VBOX
/* inform raw mode about TLB page change */
#endif
}
#else
{
}
{
}
/*
* Walks guest process memory "regions" one by one
* and calls callback function 'fn' for each region.
*/
struct walk_memory_regions_data
{
void *priv;
int prot;
};
{
if (rc != 0) {
return rc;
}
}
return 0;
}
{
int i, rc;
}
if (level == 0) {
for (i = 0; i < L2_SIZE; ++i) {
if (rc != 0) {
return rc;
}
}
}
} else {
for (i = 0; i < L2_SIZE; ++i) {
if (rc != 0) {
return rc;
}
}
}
return 0;
}
{
struct walk_memory_regions_data data;
target_ulong i;
for (i = 0; i < V_L1_SIZE; i++) {
if (rc != 0) {
return rc;
}
}
return walk_memory_regions_end(&data, 0, 0);
}
{
return (0);
}
/* dump memory mappings */
{
(void) fprintf(f, "%-8s %-8s %-8s %s\n",
"start", "end", "size", "prot");
}
{
PageDesc *p;
if (!p)
return 0;
return p->flags;
}
/* Modify the flags of a page and invalidate the code if necessary.
The flag PAGE_WRITE_ORG is positioned automatically depending
on PAGE_WRITE. The mmap_lock should already be held. */
{
/* This function should never be called with addresses outside the
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
#endif
if (flags & PAGE_WRITE) {
flags |= PAGE_WRITE_ORG;
}
#ifdef VBOX
AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n"));
#endif
len != 0;
/* If the write protection bit is set, then we invalidate
the code inside. */
if (!(p->flags & PAGE_WRITE) &&
(flags & PAGE_WRITE) &&
p->first_tb) {
}
}
}
{
PageDesc *p;
/* This function should never be called with addresses outside the
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
#endif
if (len == 0) {
return 0;
}
/* We've wrapped around. */
return -1;
}
len != 0;
if( !p )
return -1;
if( !(p->flags & PAGE_VALID) )
return -1;
return -1;
if (flags & PAGE_WRITE) {
if (!(p->flags & PAGE_WRITE_ORG))
return -1;
/* unprotect the page if it was put read-only because it
contains translated code */
if (!(p->flags & PAGE_WRITE)) {
return -1;
}
return 0;
}
}
return 0;
}
/* called from signal handler: invalidate the code and unprotect the
page. Return TRUE if the fault was successfully handled. */
{
unsigned int prot;
PageDesc *p;
/* Technically this isn't safe inside a signal handler. However we
know this only ever happens in a synchronous SEGV handler, so in
practice it seems to be ok. */
mmap_lock();
if (!p) {
mmap_unlock();
return 0;
}
/* if the page was really writable, then we change its
protection back to writable */
prot = 0;
p->flags |= PAGE_WRITE;
/* and since the content will be modified, we must invalidate
the corresponding translated code. */
#ifdef DEBUG_TB_CHECK
#endif
}
mmap_unlock();
return 1;
}
mmap_unlock();
return 0;
}
{
}
#endif /* defined(CONFIG_USER_ONLY) */
#if !defined(CONFIG_USER_ONLY)
typedef struct subpage_t {
} subpage_t;
need_subpage) \
do { \
if (addr > start_addr) \
start_addr2 = 0; \
else { \
if (start_addr2 > 0) \
need_subpage = 1; \
} \
\
else { \
need_subpage = 1; \
} \
} while (0)
/* register physical memory.
For RAM, 'size' must be a multiple of the target page size.
If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
io memory page. The address used when calling the IO function is
the offset from the start of the region, plus region_offset. Both
start_addr and region_offset are rounded down to a page boundary
before calculating this offset. This should not be a problem unless
the low bits of start_addr and region_offset differ. */
{
PhysPageDesc *p;
#ifndef VBOX
#endif /* !VBOX */
if (phys_offset == IO_MEM_UNASSIGNED) {
}
if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
int need_subpage = 0;
if (need_subpage) {
if (!(orig_memory & IO_MEM_SUBPAGE)) {
&p->phys_offset, orig_memory,
p->region_offset);
} else {
>> IO_MEM_SHIFT];
}
p->region_offset = 0;
} else {
p->phys_offset = phys_offset;
(phys_offset & IO_MEM_ROMD))
}
} else {
p->phys_offset = phys_offset;
p->region_offset = region_offset;
(phys_offset & IO_MEM_ROMD)) {
} else {
int need_subpage = 0;
if (need_subpage) {
&p->phys_offset, IO_MEM_UNASSIGNED,
addr & TARGET_PAGE_MASK);
p->region_offset = 0;
}
}
}
}
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
#ifndef VBOX
/* XXX: slow ! */
}
#else
/* We have one thread per CPU, so, one of the other EMTs might be executing
code right now and flushing the TLB may crash it. */
else
#endif
}
/* XXX: temporary until new memory mapping API */
{
PhysPageDesc *p;
if (!p)
return IO_MEM_UNASSIGNED;
return p->phys_offset;
}
#ifndef VBOX
{
if (kvm_enabled())
}
{
if (kvm_enabled())
}
void qemu_flush_coalesced_mmio_buffer(void)
{
if (kvm_enabled())
}
#if defined(__linux__) && !defined(TARGET_S390X)
#define HUGETLBFS_MAGIC 0x958458f6
{
int ret;
do {
if (ret != 0) {
return 0;
}
}
const char *path)
{
char *filename;
void *area;
int fd;
#ifdef MAP_POPULATE
int flags;
#endif
if (!hpagesize) {
return NULL;
}
return NULL;
}
if (kvm_enabled() && !kvm_has_sync_mmu()) {
return NULL;
}
return NULL;
}
if (fd < 0) {
perror("unable to create backing store for hugepages");
return NULL;
}
/*
* ftruncate is not supported by hugetlbfs in older
* hosts, so don't bother bailing out on errors.
* If anything goes wrong with it under other filesystems,
* mmap will fail.
*/
perror("ftruncate");
#ifdef MAP_POPULATE
/* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
* MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
* to sidestep this quirk.
*/
#else
#endif
if (area == MAP_FAILED) {
perror("file_ram_alloc: can't mmap RAM pages");
return (NULL);
}
return area;
}
#endif
{
return 0;
}
}
}
}
return offset;
}
static ram_addr_t last_ram_offset(void)
{
ram_addr_t last = 0;
return last;
}
{
if (id) {
}
}
abort();
}
}
last_ram_offset() >> TARGET_PAGE_BITS);
if (kvm_enabled())
}
{
if (id) {
}
}
abort();
}
}
if (mem_path) {
#if defined (__linux__) && !defined(TARGET_S390X)
#ifdef MADV_MERGEABLE
#endif
}
#else
exit(1);
#endif
} else {
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
/* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
#else
#endif
#ifdef MADV_MERGEABLE
#endif
}
last_ram_offset() >> TARGET_PAGE_BITS);
if (kvm_enabled())
}
{
if (mem_path) {
#if defined (__linux__) && !defined(TARGET_S390X)
} else {
}
#endif
} else {
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
#else
#endif
}
return;
}
}
}
/* Return a host pointer to ram allocated with qemu_ram_alloc.
With the exception of the softmmu code in this file, this should
only be used for local memory (e.g. video ram) that the device owns,
and knows it isn't going to access beyond the end of the block.
It should not be used for general purpose DMA.
Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
*/
{
}
}
abort();
return NULL;
}
/* Some of the softmmu routines need to translate from a host pointer
(typically a TLB entry) back to a ram offset. */
{
}
}
abort();
return 0;
}
#endif /* !VBOX */
{
#ifdef DEBUG_UNASSIGNED
#endif
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
#endif
return 0;
}
{
#ifdef DEBUG_UNASSIGNED
#endif
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
#endif
return 0;
}
{
#ifdef DEBUG_UNASSIGNED
#endif
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
#endif
return 0;
}
{
#ifdef DEBUG_UNASSIGNED
#endif
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
#endif
}
{
#ifdef DEBUG_UNASSIGNED
#endif
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
#endif
}
{
#ifdef DEBUG_UNASSIGNED
#endif
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
#endif
}
};
};
{
int dirty_flags;
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
#endif
}
#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
}
{
int dirty_flags;
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
#endif
}
#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
}
{
int dirty_flags;
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
#endif
}
#if defined(VBOX) && !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
}
NULL, /* never used */
NULL, /* never used */
NULL, /* never used */
};
};
/* Generate a debug exception if a watchpoint has been hit. */
{
int cpu_flags;
if (env->watchpoint_hit) {
/* We re-entered the check after replacing the TB. Now raise
* the debug interrupt so that is will trigger after the
* current instruction. */
return;
}
if (!env->watchpoint_hit) {
if (!tb) {
}
} else {
}
}
} else {
}
}
}
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
so these check for a hit then pass through to the normal out-of-line
phys routines. */
{
}
{
}
{
}
{
}
{
}
{
}
};
};
unsigned int len)
{
#if defined(DEBUG_SUBPAGE)
#endif
}
{
#if defined(DEBUG_SUBPAGE)
#endif
}
{
}
{
}
{
}
{
}
{
}
{
}
static CPUReadMemoryFunc * const subpage_read[] = {
};
static CPUWriteMemoryFunc * const subpage_write[] = {
};
{
return -1;
#if defined(DEBUG_SUBPAGE)
#endif
}
return 0;
}
{
int subpage_memory;
#if defined(DEBUG_SUBPAGE)
#endif
return mmio;
}
static int get_free_io_mem_idx(void)
{
int i;
for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
if (!io_mem_used[i]) {
io_mem_used[i] = 1;
return i;
}
return -1;
}
/* mem_read and mem_write are arrays of functions containing the
function to access byte (index 0), word (index 1) and dword (index
2). Functions can be omitted with a NULL function pointer.
If io_index is non zero, the corresponding io zone is
modified. If it is zero, a new io zone is allocated. The return
value can be used with cpu_register_physical_memory(). (-1) is
returned if error. */
static int cpu_register_io_memory_fixed(int io_index,
CPUReadMemoryFunc * const *mem_read,
CPUWriteMemoryFunc * const *mem_write,
void *opaque)
{
int i;
if (io_index <= 0) {
if (io_index == -1)
return io_index;
} else {
io_index >>= IO_MEM_SHIFT;
if (io_index >= IO_MEM_NB_ENTRIES)
return -1;
}
for (i = 0; i < 3; ++i) {
io_mem_read[io_index][i]
}
for (i = 0; i < 3; ++i) {
io_mem_write[io_index][i]
}
return (io_index << IO_MEM_SHIFT);
}
CPUWriteMemoryFunc * const *mem_write,
void *opaque)
{
}
void cpu_unregister_io_memory(int io_table_address)
{
int i;
for (i=0;i < 3; i++) {
}
io_mem_used[io_index] = 0;
}
static void io_mem_init(void)
{
int i;
for (i=0; i<5; i++)
io_mem_used[i] = 1;
}
#endif /* !defined(CONFIG_USER_ONLY) */
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
{
int l, flags;
void * p;
while (len > 0) {
if (l > len)
l = len;
if (!(flags & PAGE_VALID))
return -1;
if (is_write) {
if (!(flags & PAGE_WRITE))
return -1;
/* XXX: this code should not depend on lock_user */
return -1;
unlock_user(p, addr, l);
} else {
return -1;
/* XXX: this code should not depend on lock_user */
return -1;
unlock_user(p, addr, 0);
}
len -= l;
buf += l;
addr += l;
}
return 0;
}
#else
{
int l, io_index;
PhysPageDesc *p;
while (len > 0) {
if (l > len)
l = len;
if (!p) {
} else {
pd = p->phys_offset;
}
if (is_write) {
if (p)
/* XXX: could force cpu_single_env to NULL to avoid
potential bugs */
/* 32 bit write access */
#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
l = 4;
/* 16 bit write access */
#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
l = 2;
} else {
/* 8 bit write access */
#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
l = 1;
}
} else {
/* RAM case */
#ifdef VBOX
#else
#endif
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
/* set dirty bit */
}
}
} else {
!(pd & IO_MEM_ROMD)) {
/* I/O case */
if (p)
/* 32 bit read access */
#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
l = 4;
/* 16 bit read access */
#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
l = 2;
} else {
/* 8 bit read access */
#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
#else
#endif
l = 1;
}
} else {
/* RAM case */
#ifdef VBOX
#else
(addr & ~TARGET_PAGE_MASK);
#endif
}
}
len -= l;
buf += l;
addr += l;
}
}
#ifndef VBOX
/* used for ROM loading : can write in RAM and ROM */
{
int l;
PhysPageDesc *p;
while (len > 0) {
if (l > len)
l = len;
if (!p) {
} else {
pd = p->phys_offset;
}
!(pd & IO_MEM_ROMD)) {
/* do nothing */
} else {
}
len -= l;
buf += l;
addr += l;
}
}
typedef struct {
void *buffer;
} BounceBuffer;
static BounceBuffer bounce;
typedef struct MapClient {
void *opaque;
} MapClient;
{
return client;
}
void cpu_unregister_map_client(void *_client)
{
}
static void cpu_notify_map_clients(void)
{
while (!QLIST_EMPTY(&map_client_list)) {
}
}
/* Map a physical memory region into a host virtual address.
* May map a subset of the requested range, given by and returned in *plen.
* May return NULL if resources needed to perform the mapping are exhausted.
* Use only for reads OR writes - not for read-modify-write operations.
* Use cpu_register_map_client() to know when retrying the map operation is
* likely to succeed.
*/
int is_write)
{
target_phys_addr_t done = 0;
int l;
PhysPageDesc *p;
while (len > 0) {
if (l > len)
l = len;
if (!p) {
} else {
pd = p->phys_offset;
}
break;
}
if (!is_write) {
}
} else {
}
if (!done) {
break;
}
len -= l;
addr += l;
done += l;
}
return ret;
}
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
* Will also mark the memory as dirty if is_write == 1. access_len gives
* the amount of memory that was actually read or written by the caller.
*/
{
if (is_write) {
while (access_len) {
unsigned l;
l = TARGET_PAGE_SIZE;
if (l > access_len)
l = access_len;
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
/* set dirty bit */
}
addr1 += l;
access_len -= l;
}
}
return;
}
if (is_write) {
}
}
#endif /* !VBOX */
/* warning: addr must be aligned */
{
int io_index;
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
!(pd & IO_MEM_ROMD)) {
/* I/O case */
if (p)
} else {
/* RAM case */
#ifndef VBOX
(addr & ~TARGET_PAGE_MASK);
#else
#endif
}
return val;
}
/* warning: addr must be aligned */
{
int io_index;
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
!(pd & IO_MEM_ROMD)) {
/* I/O case */
if (p)
#ifdef TARGET_WORDS_BIGENDIAN
#else
#endif
} else {
/* RAM case */
#ifndef VBOX
(addr & ~TARGET_PAGE_MASK);
#else
#endif
}
return val;
}
/* XXX: optimize */
{
return val;
}
/* warning: addr must be aligned */
{
int io_index;
#ifndef VBOX
#endif
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
!(pd & IO_MEM_ROMD)) {
/* I/O case */
if (p)
} else {
/* RAM case */
#ifndef VBOX
(addr & ~TARGET_PAGE_MASK);
#else
#endif
}
return val;
}
/* warning: addr must be aligned. The ram page is not masked as dirty
and the code inside is not invalidated. It is useful if the dirty
bits are used to track modified PTEs */
{
int io_index;
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
if (p)
} else {
#ifndef VBOX
#else
#endif
#ifndef VBOX
if (unlikely(in_migration)) {
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
/* set dirty bit */
}
}
#endif /* !VBOX */
}
}
{
int io_index;
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
if (p)
#ifdef TARGET_WORDS_BIGENDIAN
#else
#endif
} else {
#ifndef VBOX
(addr & ~TARGET_PAGE_MASK);
#else
#endif
}
}
/* warning: addr must be aligned */
{
int io_index;
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
if (p)
} else {
/* RAM case */
#ifndef VBOX
#else
#endif
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
/* set dirty bit */
(0xff & ~CODE_DIRTY_FLAG));
}
}
}
/* XXX: optimize */
{
}
/* warning: addr must be aligned */
{
int io_index;
PhysPageDesc *p;
if (!p) {
} else {
pd = p->phys_offset;
}
if (p)
} else {
/* RAM case */
#ifndef VBOX
#else
#endif
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
/* set dirty bit */
(0xff & ~CODE_DIRTY_FLAG));
}
}
}
/* XXX: optimize */
{
}
#ifndef VBOX
/* virtual memory access for debug (includes writing to ROM) */
{
int l;
while (len > 0) {
/* if no physical page mapped, return an error */
if (phys_addr == -1)
return -1;
if (l > len)
l = len;
if (is_write)
else
len -= l;
buf += l;
addr += l;
}
return 0;
}
#endif /* !VBOX */
#endif
must be at the end of the TB */
{
if (!tb) {
retaddr);
}
/* Calculate how many instructions had been executed before the fault
occurred. */
/* Generate a new TB ending on the I/O insn. */
n++;
/* On MIPS and SH, delay slot instructions can only be restarted if
they were already the first instruction in the TB. If this is not
the first instruction in a TB then re-execute the preceding
branch. */
#if defined(TARGET_MIPS)
}
#elif defined(TARGET_SH4)
&& n > 1) {
}
#endif
/* This should never happen. */
if (n > CF_COUNT_MASK)
cflags = n | CF_LAST_IO;
/* FIXME: In theory this could raise an exception. In practice
we have already translated the block once so it's probably ok. */
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
the first in the TB) then we end up generating a whole new TB and
repeating the fault, which is horribly inefficient.
Better would be to execute just this insn uncached, or generate a
second new TB. */
}
#if !defined(CONFIG_USER_ONLY)
#ifndef VBOX
void dump_exec_info(FILE *f,
{
int i, target_code_size, max_target_code_size;
target_code_size = 0;
max_target_code_size = 0;
cross_page = 0;
direct_jmp_count = 0;
direct_jmp2_count = 0;
for(i = 0; i < nb_tbs; i++) {
cross_page++;
}
}
}
/* XXX: avoid using doubles ? */
cpu_fprintf(f, "Translation buffer state:\n");
cpu_fprintf(f, "gen code size %ld/%ld\n",
cpu_fprintf(f, "TB count %d/%d\n",
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
cpu_fprintf(f, "\nStatistics:\n");
tcg_dump_info(f, cpu_fprintf);
}
#endif /* !VBOX */
#define env cpu_single_env
#define SOFTMMU_CODE_ACCESS
#define SHIFT 0
#include "softmmu_template.h"
#define SHIFT 1
#include "softmmu_template.h"
#define SHIFT 2
#include "softmmu_template.h"
#define SHIFT 3
#include "softmmu_template.h"
#endif