helper2.c revision 9b898ee3642a830a4d4af4d0a485a49d157b5551
/*
* i386 helpers (without register variable usage)
*
* Copyright (c) 2003 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#ifndef VBOX
#include <signal.h>
#include <assert.h>
#else
#endif
#include "cpu.h"
#include "exec-all.h"
//#define DEBUG_MMU
#ifdef USE_CODE_COPY
{
}
#define modify_ldt_ldt_s user_desc
#endif
#endif /* USE_CODE_COPY */
#ifdef VBOX
{
#else /* !VBOX */
CPUX86State *cpu_x86_init(void)
{
#endif /* !VBOX */
static int inited;
#ifndef VBOX
if (!env)
return NULL;
#endif /* !VBOX */
/* init various static tables */
if (!inited) {
inited = 1;
}
#ifdef USE_CODE_COPY
/* testing code for code copy case */
{
struct modify_ldt_ldt_s ldt;
ldt.read_exec_only = 0;
ldt.seg_not_present = 0;
asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
}
#endif
#ifndef VBOX /* cpuid_features is initialized by caller */
{
#ifdef TARGET_X86_64
family = 6;
model = 2;
stepping = 3;
#else
#if 0
/* pentium 75-200 */
family = 5;
model = 2;
stepping = 11;
#else
/* pentium pro */
family = 6;
model = 3;
stepping = 3;
#endif
#endif
env->cpuid_xlevel = 0;
{
int c, len, i;
for(i = 0; i < 48; i++) {
if (i >= len)
c = '\0';
else
c = model_id[i];
}
}
#ifdef TARGET_X86_64
/* currently not enabled for std i386 because not fully tested */
/* these features are needed for Win64 and aren't fully implemented */
/* this feature is needed for Solaris and isn't fully implemented */
#endif
}
#endif /* VBOX */
#ifdef USE_KQEMU
#endif
return env;
}
/* NOTE: must be called outside the CPU execute loop */
{
int i;
/* init to reset state */
#ifdef CONFIG_SOFTMMU
#endif
/* FPU init */
for(i = 0;i < 8; i++)
}
#ifndef VBOX
{
}
#endif
/***********************************************************/
/* x86 debug */
static const char *cc_op_str[] = {
"DYNAMIC",
"EFLAGS",
"MULB",
"MULW",
"MULL",
"MULQ",
"ADDB",
"ADDW",
"ADDL",
"ADDQ",
"ADCB",
"ADCW",
"ADCL",
"ADCQ",
"SUBB",
"SUBW",
"SUBL",
"SUBQ",
"SBBB",
"SBBW",
"SBBL",
"SBBQ",
"LOGICB",
"LOGICW",
"LOGICL",
"LOGICQ",
"INCB",
"INCW",
"INCL",
"INCQ",
"DECB",
"DECW",
"DECL",
"DECQ",
"SHLB",
"SHLW",
"SHLL",
"SHLQ",
"SARB",
"SARW",
"SARL",
"SARQ",
};
int flags)
{
char cc_op_name[32];
#ifdef TARGET_X86_64
cpu_fprintf(f,
} else
#endif
{
cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
"ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
"EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
}
#ifdef TARGET_X86_64
for(i = 0; i < 6; i++) {
seg_name[i],
}
} else
#endif
{
for(i = 0; i < 6; i++) {
cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
seg_name[i],
}
cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
cpu_fprintf(f, "GDT= %08x %08x\n",
cpu_fprintf(f, "IDT= %08x %08x\n",
cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
}
if (flags & X86_DUMP_CCOP) {
else
#ifdef TARGET_X86_64
} else
#endif
{
cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
}
}
if (flags & X86_DUMP_FPU) {
int fptag;
fptag = 0;
for(i = 0; i < 8; i++) {
}
cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
for(i=0;i<8;i++) {
#if defined(USE_X86LDOUBLE)
union {
long double d;
struct {
} l;
} tmp;
#else
#endif
if ((i & 1) == 1)
cpu_fprintf(f, "\n");
else
cpu_fprintf(f, " ");
}
nb = 16;
else
nb = 8;
for(i=0;i<nb;i++) {
cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
i,
if ((i & 1) == 1)
cpu_fprintf(f, "\n");
else
cpu_fprintf(f, " ");
}
}
}
/***********************************************************/
/* x86 mmu */
/* XXX: add PGE support */
{
#if defined(DEBUG_MMU)
#endif
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
/* when a20 is changed, all the MMU mappings are invalid, so
we must flush everything */
}
}
{
int pe_state;
#if defined(DEBUG_MMU)
#endif
}
#ifdef TARGET_X86_64
/* enter in long mode */
/* XXX: generate an exception */
return;
/* exit long mode */
}
#endif
/* update PE flag in hidden flags */
/* ensure that ADDSEG is always set in real mode */
/* update FPU flags */
#ifdef VBOX
#endif
}
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
the PDPT */
{
#if defined(DEBUG_MMU)
#endif
}
}
{
#if defined(DEBUG_MMU)
#endif
}
/* SSE handling */
new_cr4 &= ~CR4_OSFXSR_MASK;
if (new_cr4 & CR4_OSFXSR_MASK)
else
#ifdef VBOX
#endif
}
/* XXX: also flush 4MB pages */
{
/* page directory entry */
/* if PSE bit is set, then we use a 4MB page */
printf("cpu_x86_flush_tlb: 4 MB page!!!!!\n");
}
#endif
}
#if defined(CONFIG_USER_ONLY)
{
/* user mode only emulation */
is_write &= 1;
return 1;
}
{
return addr;
}
#else
#define PHYS_ADDR_MASK 0xfffff000
/* return value:
-1 = cannot handle fault
0 = nothing more to do
1 = generate PF fault
2 = soft MMU activation required for this block
*/
{
unsigned long paddr, page_offset;
#if defined(DEBUG_MMU)
#endif
page_size = 4096;
goto do_mapping;
}
/* XXX: we only use 32 bit physical addresses */
#ifdef TARGET_X86_64
/* test virtual address sign extension */
env->error_code = 0;
return 1;
}
if (!(pml4e & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
goto do_fault;
}
if (!(pml4e & PG_ACCESSED_MASK)) {
}
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
goto do_fault;
}
if (!(pdpe & PG_ACCESSED_MASK)) {
pdpe |= PG_ACCESSED_MASK;
}
} else
#endif
{
/* XXX: load them when cr3 is loaded ? */
if (!(pdpe & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
}
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
goto do_fault;
}
if (pde & PG_PSE_MASK) {
/* 2 MB page */
ptep ^= PG_NX_MASK;
goto do_fault_protect;
if (is_user) {
if (!(ptep & PG_USER_MASK))
goto do_fault_protect;
goto do_fault_protect;
} else {
goto do_fault_protect;
}
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
}
/* align to page_size */
} else {
/* 4 KB page */
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
}
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
goto do_fault;
}
/* combine pde and pte nx, user and rw protections */
ptep ^= PG_NX_MASK;
goto do_fault_protect;
if (is_user) {
if (!(ptep & PG_USER_MASK))
goto do_fault_protect;
goto do_fault_protect;
} else {
goto do_fault_protect;
}
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
}
page_size = 4096;
}
} else {
/* page directory entry */
if (!(pde & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
/* if PSE bit is set, then we use a 4MB page */
if (is_user) {
if (!(pde & PG_USER_MASK))
goto do_fault_protect;
goto do_fault_protect;
} else {
goto do_fault_protect;
}
pde |= PG_ACCESSED_MASK;
if (is_dirty)
pde |= PG_DIRTY_MASK;
}
} else {
if (!(pde & PG_ACCESSED_MASK)) {
pde |= PG_ACCESSED_MASK;
}
/* page directory entry */
if (!(pte & PG_PRESENT_MASK)) {
error_code = 0;
goto do_fault;
}
/* combine pde and pte user and rw protections */
if (is_user) {
if (!(ptep & PG_USER_MASK))
goto do_fault_protect;
goto do_fault_protect;
} else {
goto do_fault_protect;
}
pte |= PG_ACCESSED_MASK;
if (is_dirty)
pte |= PG_DIRTY_MASK;
}
page_size = 4096;
}
}
/* the page can be put in the TLB */
if (!(ptep & PG_NX_MASK))
if (pte & PG_DIRTY_MASK) {
/* only set write access if already dirty... otherwise wait
for dirty access */
if (is_user) {
if (ptep & PG_RW_MASK)
prot |= PAGE_WRITE;
} else {
(ptep & PG_RW_MASK))
prot |= PAGE_WRITE;
}
}
/* Even if 4MB pages, we map only one 4KB page in the cache to
avoid filling it too fast */
return ret;
if (is_user)
if (is_write1 == 2 &&
return 1;
}
{
/* XXX: we only use 32 bit physical addresses */
#ifdef TARGET_X86_64
/* test virtual address sign extension */
return -1;
if (!(pml4e & PG_PRESENT_MASK))
return -1;
if (!(pdpe & PG_PRESENT_MASK))
return -1;
} else
#endif
{
if (!(pdpe & PG_PRESENT_MASK))
return -1;
}
if (!(pde & PG_PRESENT_MASK)) {
return -1;
}
if (pde & PG_PSE_MASK) {
/* 2 MB page */
} else {
/* 4 KB page */
page_size = 4096;
}
} else {
page_size = 4096;
} else {
/* page directory entry */
if (!(pde & PG_PRESENT_MASK))
return -1;
} else {
/* page directory entry */
if (!(pte & PG_PRESENT_MASK))
return -1;
page_size = 4096;
}
}
}
return paddr;
}
#endif /* !CONFIG_USER_ONLY */
#if defined(USE_CODE_COPY)
struct fpstate {
};
{
int fptag, i, j;
fptag = 0;
for (i=7; i>=0; i--) {
fptag <<= 2;
fptag |= 3;
} else {
/* the FPU automatically computes it */
}
}
for(i = 0;i < 8; i++) {
j = (j + 1) & 7;
}
}
{
int fptag, i, j;
for(i = 0;i < 8; i++) {
fptag >>= 2;
}
for(i = 0;i < 8; i++) {
j = (j + 1) & 7;
}
/* we must restore the default rounding state */
/* XXX: we do not restore the exception state */
env->native_fp_regs = 0;
}
#endif