AMD64ToPAE.asm revision ddd7306b8aaa79534e08df0516997ed62c549f72
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; VMM - World Switchers, AMD64 to PAE.
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; Copyright (C) 2006 InnoTek Systemberatung GmbH
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; This file is part of VirtualBox Open Source Edition (OSE), as
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; available from http://www.virtualbox.org. This file is free software;
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; you can redistribute it and/or modify it under the terms of the GNU
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; General Public License as published by the Free Software Foundation,
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; distribution. VirtualBox OSE is distributed in the hope that it will
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; be useful, but WITHOUT ANY WARRANTY of any kind.
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; If you received this file as part of a commercial VirtualBox
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; distribution, then only the terms of your commercial VirtualBox
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; license agreement apply instead of the previous paragraph.
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync;%define DEBUG_STUFF 1
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync%define STRICT_IF 1
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync;*******************************************************************************
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync;* Defined Constants And Macros *
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync;*******************************************************************************
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync;; Prefix all names.
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync%define NAME_OVERLOAD(name) vmmR3SwitcherAMD64ToPAE_ %+ name
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync;*******************************************************************************
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync;* Header Files *
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync;*******************************************************************************
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; Start the fixup records
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; We collect the fixups in the .data section as we go along
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; It is therefore VITAL that no-one is using the .data section
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsync; for anything else between 'Start' and 'End'.
1f1986470af9f0bb750dd859b142dc2e952deb20vboxsyncGLOBALNAME Fixups
lea rdx, [r9 + VM.cpum]
lea r8, [r9 + VM.StatSwitcherToGC]
lea r8, [r9 + VM.StatSwitcherToGC]
; mov [rdx + CPUM.Host.rax], rax - scratch
mov [rdx + CPUM.Host.rbx], rbx
; mov [rdx + CPUM.Host.rcx], rcx - scratch
; mov [rdx + CPUM.Host.rdx], rdx - scratch
mov [rdx + CPUM.Host.rdi], rdi
mov [rdx + CPUM.Host.rsi], rsi
mov [rdx + CPUM.Host.rsp], rsp
mov [rdx + CPUM.Host.rbp], rbp
; mov [rdx + CPUM.Host.r8 ], r8 - scratch
; mov [rdx + CPUM.Host.r9 ], r9 - scratch
mov [rdx + CPUM.Host.r10], r10
mov [rdx + CPUM.Host.r11], r11
mov [rdx + CPUM.Host.r12], r12
mov [rdx + CPUM.Host.r13], r13
mov [rdx + CPUM.Host.r14], r14
mov [rdx + CPUM.Host.r15], r15
mov [rdx + CPUM.Host.ds], ds
mov [rdx + CPUM.Host.es], es
mov [rdx + CPUM.Host.fs], fs
mov [rdx + CPUM.Host.gs], gs
mov [rdx + CPUM.Host.ss], ss
mov [rbx + CPUM.Host.FSbase], eax
mov [rbx + CPUM.Host.FSbase + 4], edx
mov [rbx + CPUM.Host.GSbase], eax
mov [rbx + CPUM.Host.GSbase + 4], edx
mov [rbx + CPUM.Host.efer], eax
mov [rbx + CPUM.Host.efer + 4], edx
sldt [rdx + CPUM.Host.ldtr]
sidt [rdx + CPUM.Host.idtr]
sgdt [rdx + CPUM.Host.gdtr]
str [rdx + CPUM.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
pop qword [rdx + CPUM.Host.rflags]
FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
mov [rbx + CPUM.Host.SysEnter.cs], rax
mov [rbx + CPUM.Host.SysEnter.cs + 4], rdx
mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
mov [rdx + CPUM.fUseFlags], esi
mov [rdx + CPUM.Host.cr0], rax
;mov [rdx + CPUM.Host.cr2], rax
mov [rdx + CPUM.Host.cr3], rax
mov [rdx + CPUM.Host.cr4], rax
; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
mov ecx, [rdx + CPUM.Guest.cr4]
; in CPUM.Hyper.cr4 (which isn't currently being used). That should
and ecx, [rdx + CPUM.CR4.AndMask]
or eax, [rdx + CPUM.CR4.OrMask]
mov eax, [rdx + CPUM.Guest.cr0]
lgdt [rdx + CPUM.Hyper.gdtr]
mov [rdx + CPUM.Host.dr7], rax
mov [rdx + CPUM.Host.dr6], rax
mov [rdx + CPUM.Host.dr0], rax
mov [rdx + CPUM.Host.dr1], rbx
mov [rdx + CPUM.Host.dr2], rcx
mov [rdx + CPUM.Host.dr3], rax
lidt [edx + CPUM.Hyper.idtr]
lss esp, [edx + CPUM.Hyper.esp]
ltr word [edx + CPUM.Hyper.tr]
lldt [edx + CPUM.Hyper.ldtr]
mov esi, [edx + CPUM.fUseFlags]
mov ebx, [edx + CPUM.Hyper.ebx]
mov ebp, [edx + CPUM.Hyper.ebp]
mov esi, [edx + CPUM.Hyper.esi]
mov edi, [edx + CPUM.Hyper.edi]
push dword [edx + CPUM.Hyper.eflags]
mov eax, [edx + CPUM.Hyper.eip]
mov eax, [edx + CPUM.Hyper.eip]
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
mov ebx, [edx + CPUM.Hyper.dr0]
mov ecx, [edx + CPUM.Hyper.dr1]
mov eax, [edx + CPUM.Hyper.dr2]
mov ebx, [edx + CPUM.Hyper.dr3]
;mov eax, [edx + CPUM.Hyper.dr6]
mov eax, [edx + CPUM.Hyper.dr7]
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
pop dword [edx + CPUM.Guest.edi]
pop dword [edx + CPUM.Guest.esi]
pop dword [edx + CPUM.Guest.ebp]
pop dword [edx + CPUM.Guest.eax]
pop dword [edx + CPUM.Guest.ebx]
pop dword [edx + CPUM.Guest.edx]
pop dword [edx + CPUM.Guest.ecx]
pop dword [edx + CPUM.Guest.esp]
pop dword [edx + CPUM.Guest.ss]
pop dword [edx + CPUM.Guest.gs]
pop dword [edx + CPUM.Guest.fs]
pop dword [edx + CPUM.Guest.es]
pop dword [edx + CPUM.Guest.ds]
pop dword [edx + CPUM.Guest.cs]
pop dword [edx + CPUM.Guest.eflags]
pop dword [edx + CPUM.Guest.eip]
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
mov eax, [ecx + CPUMCTXCORE.edi]
mov [edx + CPUM.Hyper.edi], eax
mov eax, [ecx + CPUMCTXCORE.esi]
mov [edx + CPUM.Hyper.esi], eax
mov eax, [ecx + CPUMCTXCORE.ebp]
mov [edx + CPUM.Hyper.ebp], eax
mov eax, [ecx + CPUMCTXCORE.eax]
mov [edx + CPUM.Hyper.eax], eax
mov eax, [ecx + CPUMCTXCORE.ebx]
mov [edx + CPUM.Hyper.ebx], eax
mov eax, [ecx + CPUMCTXCORE.edx]
mov [edx + CPUM.Hyper.edx], eax
mov eax, [ecx + CPUMCTXCORE.ecx]
mov [edx + CPUM.Hyper.ecx], eax
mov eax, [ecx + CPUMCTXCORE.esp]
mov [edx + CPUM.Hyper.esp], eax
mov eax, [ecx + CPUMCTXCORE.ss]
mov [edx + CPUM.Hyper.ss], eax
mov eax, [ecx + CPUMCTXCORE.gs]
mov [edx + CPUM.Hyper.gs], eax
mov eax, [ecx + CPUMCTXCORE.fs]
mov [edx + CPUM.Hyper.fs], eax
mov eax, [ecx + CPUMCTXCORE.es]
mov [edx + CPUM.Hyper.es], eax
mov eax, [ecx + CPUMCTXCORE.ds]
mov [edx + CPUM.Hyper.ds], eax
mov eax, [ecx + CPUMCTXCORE.cs]
mov [edx + CPUM.Hyper.cs], eax
mov eax, [ecx + CPUMCTXCORE.eflags]
mov [edx + CPUM.Hyper.eflags], eax
mov eax, [ecx + CPUMCTXCORE.eip]
mov [edx + CPUM.Hyper.eip], eax
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
pop dword [edx + CPUM.Hyper.eip] ; call return from stack
mov dword [edx + CPUM.Hyper.ebx], ebx
mov dword [edx + CPUM.Hyper.esi], esi
mov dword [edx + CPUM.Hyper.edi], edi
mov dword [edx + CPUM.Hyper.ebp], ebp
mov dword [edx + CPUM.Hyper.esp], esp
; str [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
sldt [edx + CPUM.Hyper.ldtr]
; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
mov rsi, [rdx + CPUM.Host.cr3]
lgdt [rdx + CPUM.Host.gdtr]
lidt [rdx + CPUM.Host.idtr]
movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
ltr word [rdx + CPUM.Host.tr]
movzx eax, word [rdx + CPUM.Host.tr] ; eax <- TR
ltr word [rdx + CPUM.Host.tr]
lldt [rdx + CPUM.Host.ldtr]
mov eax, [rdx + CPUM.Host.ds]
mov eax, [rdx + CPUM.Host.es]
mov eax, [rdx + CPUM.Host.fs]
mov eax, [rdx + CPUM.Host.gs]
mov eax, [rdx + CPUM.Host.ss]
mov rsp, [rdx + CPUM.Host.rsp]
FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
mov eax, [rdx + CPUM.Host.SysEnter.cs]
mov ebx, [rdx + CPUM.Host.SysEnter.cs + 4]
mov rbx, rdx ; save/load edx
mov esi, [rdx + CPUM.fUseFlags] ; esi == use flags.
fxsave [rdx + CPUM.Guest.fpu]
fxrstor [rdx + CPUM.Host.fpu]
mov rcx, [rdx + CPUM.Host.cr4]
mov rcx, [rdx + CPUM.Host.cr0]
;mov rcx, [rdx + CPUM.Host.cr2] ; assumes this is waste of time.
mov eax, [rbx + CPUM.Host.FSbase]
mov edx, [rbx + CPUM.Host.FSbase + 4]
mov eax, [rbx + CPUM.Host.GSbase]
mov edx, [rbx + CPUM.Host.GSbase + 4]
mov eax, [rbx + CPUM.Host.efer]
mov edx, [rbx + CPUM.Host.efer + 4]
; mov rax, [rdx + CPUM.Host.rax] - scratch + return code
mov rbx, [rdx + CPUM.Host.rbx]
; mov rcx, [rdx + CPUM.Host.rcx] - scratch
; mov rdx, [rdx + CPUM.Host.rdx] - scratch
mov rdi, [rdx + CPUM.Host.rdi]
mov rsi, [rdx + CPUM.Host.rsi]
mov rsp, [rdx + CPUM.Host.rsp]
mov rbp, [rdx + CPUM.Host.rbp]
; mov r8, [rdx + CPUM.Host.r8 ] - scratch
; mov r9, [rdx + CPUM.Host.r9 ] - scratch
mov r10, [rdx + CPUM.Host.r10]
mov r11, [rdx + CPUM.Host.r11]
mov r12, [rdx + CPUM.Host.r12]
mov r13, [rdx + CPUM.Host.r13]
mov r14, [rdx + CPUM.Host.r14]
mov r15, [rdx + CPUM.Host.r15]
push qword [rdx + CPUM.Host.rflags]
mov rax, [rdx + CPUM.Host.dr0]
mov rbx, [rdx + CPUM.Host.dr1]
mov rcx, [rdx + CPUM.Host.dr2]
mov rax, [rdx + CPUM.Host.dr3]
mov rbx, [rdx + CPUM.Host.dr6]
mov rcx, [rdx + CPUM.Host.dr7]
db "AMD64 to/from PAE", 0
at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
at VMMSWITCHERDEF.enmType, dd VMMSWITCHER_AMD64_TO_PAE
at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
at VMMSWITCHERDEF.offHCCode0, dd 0
at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)