AMD64andLegacy.mac revision 4a25fdc1810f28c7813f2fd13ab04ce25b60f30b
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; VMM - World Switchers, template for AMD64 to PAE and 32-bit.
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; Copyright (C) 2006-2007 Sun Microsystems, Inc.
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; This file is part of VirtualBox Open Source Edition (OSE), as
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; available from http://www.virtualbox.org. This file is free software;
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; you can redistribute it and/or modify it under the terms of the GNU
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; General Public License (GPL) as published by the Free Software
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; Foundation, in version 2 as it comes in the "COPYING" file of the
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; Clara, CA 95054 USA or visit http://www.sun.com if you need
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; additional information or have any questions.
039cd2c4871a00e51af909222a34695d9cec3000vboxsync;%define DEBUG_STUFF 1
039cd2c4871a00e51af909222a34695d9cec3000vboxsync;%define STRICT_IF 1
039cd2c4871a00e51af909222a34695d9cec3000vboxsync;*******************************************************************************
039cd2c4871a00e51af909222a34695d9cec3000vboxsync;* Header Files *
039cd2c4871a00e51af909222a34695d9cec3000vboxsync;*******************************************************************************
039cd2c4871a00e51af909222a34695d9cec3000vboxsync; Start the fixup records
lea rdx, [r9 + VM.cpum]
lea r8, [r9 + VM.StatSwitcherToGC]
lea r8, [r9 + VM.StatSwitcherToGC]
FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
; Thunk to/from 64 bit when invoking the worker routine.
FIXUP FIX_HC_VM_OFF, 1, VM.cpum
FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
mov r8, [rdx + CPUM.ulOffCPUMCPU]
; mov [rdx + r8 + CPUMCPU.Host.rax], rax - scratch
mov [rdx + r8 + CPUMCPU.Host.rbx], rbx
; mov [rdx + r8 + CPUMCPU.Host.rcx], rcx - scratch
; mov [rdx + r8 + CPUMCPU.Host.rdx], rdx - scratch
mov [rdx + r8 + CPUMCPU.Host.rdi], rdi
mov [rdx + r8 + CPUMCPU.Host.rsi], rsi
mov [rdx + r8 + CPUMCPU.Host.rsp], rsp
mov [rdx + r8 + CPUMCPU.Host.rbp], rbp
; mov [rdx + r8 + CPUMCPU.Host.r8 ], r8 - scratch
; mov [rdx + r8 + CPUMCPU.Host.r9 ], r9 - scratch
mov [rdx + r8 + CPUMCPU.Host.r10], r10
mov [rdx + r8 + CPUMCPU.Host.r11], r11
mov [rdx + r8 + CPUMCPU.Host.r12], r12
mov [rdx + r8 + CPUMCPU.Host.r13], r13
mov [rdx + r8 + CPUMCPU.Host.r14], r14
mov [rdx + r8 + CPUMCPU.Host.r15], r15
mov [rdx + r8 + CPUMCPU.Host.ds], ds
mov [rdx + r8 + CPUMCPU.Host.es], es
mov [rdx + r8 + CPUMCPU.Host.fs], fs
mov [rdx + r8 + CPUMCPU.Host.gs], gs
mov [rdx + r8 + CPUMCPU.Host.ss], ss
mov [rbx + r8 + CPUMCPU.Host.FSbase], eax
mov [rbx + r8 + CPUMCPU.Host.FSbase + 4], edx
mov [rbx + r8 + CPUMCPU.Host.GSbase], eax
mov [rbx + r8 + CPUMCPU.Host.GSbase + 4], edx
mov [rbx + r8 + CPUMCPU.Host.efer], eax
mov [rbx + r8 + CPUMCPU.Host.efer + 4], edx
sldt [rdx + r8 + CPUMCPU.Host.ldtr]
sidt [rdx + r8 + CPUMCPU.Host.idtr]
sgdt [rdx + r8 + CPUMCPU.Host.gdtr]
str [rdx + r8 + CPUMCPU.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
pop qword [rdx + r8 + CPUMCPU.Host.rflags]
FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs], eax
mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4], edx
mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
mov [rdx + r8 + CPUMCPU.fUseFlags], esi
mov [rdx + r8 + CPUMCPU.Host.cr0], rax
;mov [rdx + r8 + CPUMCPU.Host.cr2], rax
mov [rdx + r8 + CPUMCPU.Host.cr3], rax
mov [rdx + r8 + CPUMCPU.Host.cr4], rax
; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
mov ecx, [rdx + r8 + CPUMCPU.Guest.cr4]
; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
and ecx, [rdx + CPUM.CR4.AndMask]
or eax, [rdx + CPUM.CR4.OrMask]
mov eax, [rdx + r8 + CPUMCPU.Guest.cr0]
lgdt [rdx + r8 + CPUMCPU.Hyper.gdtr]
mov ebp, [rdx + r8 + CPUMCPU.Hyper.cr3]
mov [rdx + r8 + CPUMCPU.Host.dr7], rax
mov [rdx + r8 + CPUMCPU.Host.dr6], rax
mov [rdx + r8 + CPUMCPU.Host.dr0], rax
mov [rdx + r8 + CPUMCPU.Host.dr1], rbx
mov [rdx + r8 + CPUMCPU.Host.dr2], rcx
mov [rdx + r8 + CPUMCPU.Host.dr3], rax
lidt [edx + CPUMCPU.Hyper.idtr]
mov eax, [edx + CPUMCPU.Hyper.esp]
mov [edx + CPUMCPU.Hyper.lss_esp], eax
lss esp, [edx + CPUMCPU.Hyper.lss_esp]
ltr word [edx + CPUMCPU.Hyper.tr]
lldt [edx + CPUMCPU.Hyper.ldtr]
mov esi, [edx + CPUMCPU.fUseFlags]
mov ebx, [edx + CPUMCPU.Hyper.ebx]
mov ebp, [edx + CPUMCPU.Hyper.ebp]
mov esi, [edx + CPUMCPU.Hyper.esi]
mov edi, [edx + CPUMCPU.Hyper.edi]
push dword [edx + CPUMCPU.Hyper.eflags]
mov eax, [edx + CPUMCPU.Hyper.eip]
mov eax, [edx + CPUMCPU.Hyper.eip]
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
mov ebx, [edx + CPUMCPU.Hyper.dr]
mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
;mov eax, [edx + CPUMCPU.Hyper.dr + 8*6]
mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
mov eax, [esp + 4 + CPUMCTXCORE.edi]
mov [edx + CPUMCPU.Guest.edi], eax
mov eax, [esp + 4 + CPUMCTXCORE.esi]
mov [edx + CPUMCPU.Guest.esi], eax
mov eax, [esp + 4 + CPUMCTXCORE.ebp]
mov [edx + CPUMCPU.Guest.ebp], eax
mov eax, [esp + 4 + CPUMCTXCORE.eax]
mov [edx + CPUMCPU.Guest.eax], eax
mov eax, [esp + 4 + CPUMCTXCORE.ebx]
mov [edx + CPUMCPU.Guest.ebx], eax
mov eax, [esp + 4 + CPUMCTXCORE.edx]
mov [edx + CPUMCPU.Guest.edx], eax
mov eax, [esp + 4 + CPUMCTXCORE.ecx]
mov [edx + CPUMCPU.Guest.ecx], eax
mov eax, [esp + 4 + CPUMCTXCORE.esp]
mov [edx + CPUMCPU.Guest.esp], eax
mov eax, [esp + 4 + CPUMCTXCORE.ss]
mov [edx + CPUMCPU.Guest.ss], eax
mov eax, [esp + 4 + CPUMCTXCORE.gs]
mov [edx + CPUMCPU.Guest.gs], eax
mov eax, [esp + 4 + CPUMCTXCORE.fs]
mov [edx + CPUMCPU.Guest.fs], eax
mov eax, [esp + 4 + CPUMCTXCORE.es]
mov [edx + CPUMCPU.Guest.es], eax
mov eax, [esp + 4 + CPUMCTXCORE.ds]
mov [edx + CPUMCPU.Guest.ds], eax
mov eax, [esp + 4 + CPUMCTXCORE.cs]
mov [edx + CPUMCPU.Guest.cs], eax
mov eax, [esp + 4 + CPUMCTXCORE.eflags]
mov [edx + CPUMCPU.Guest.eflags], eax
mov eax, [esp + 4 + CPUMCTXCORE.eip]
mov [edx + CPUMCPU.Guest.eip], eax
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
mov eax, [ecx + CPUMCTXCORE.edi]
mov [edx + CPUMCPU.Hyper.edi], eax
mov eax, [ecx + CPUMCTXCORE.esi]
mov [edx + CPUMCPU.Hyper.esi], eax
mov eax, [ecx + CPUMCTXCORE.ebp]
mov [edx + CPUMCPU.Hyper.ebp], eax
mov eax, [ecx + CPUMCTXCORE.eax]
mov [edx + CPUMCPU.Hyper.eax], eax
mov eax, [ecx + CPUMCTXCORE.ebx]
mov [edx + CPUMCPU.Hyper.ebx], eax
mov eax, [ecx + CPUMCTXCORE.edx]
mov [edx + CPUMCPU.Hyper.edx], eax
mov eax, [ecx + CPUMCTXCORE.ecx]
mov [edx + CPUMCPU.Hyper.ecx], eax
mov eax, [ecx + CPUMCTXCORE.esp]
mov [edx + CPUMCPU.Hyper.esp], eax
mov eax, [ecx + CPUMCTXCORE.ss]
mov [edx + CPUMCPU.Hyper.ss], eax
mov eax, [ecx + CPUMCTXCORE.gs]
mov [edx + CPUMCPU.Hyper.gs], eax
mov eax, [ecx + CPUMCTXCORE.fs]
mov [edx + CPUMCPU.Hyper.fs], eax
mov eax, [ecx + CPUMCTXCORE.es]
mov [edx + CPUMCPU.Hyper.es], eax
mov eax, [ecx + CPUMCTXCORE.ds]
mov [edx + CPUMCPU.Hyper.ds], eax
mov eax, [ecx + CPUMCTXCORE.cs]
mov [edx + CPUMCPU.Hyper.cs], eax
mov eax, [ecx + CPUMCTXCORE.eflags]
mov [edx + CPUMCPU.Hyper.eflags], eax
mov eax, [ecx + CPUMCTXCORE.eip]
mov [edx + CPUMCPU.Hyper.eip], eax
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
mov dword [edx + CPUMCPU.Hyper.ebx], ebx
mov dword [edx + CPUMCPU.Hyper.esi], esi
mov dword [edx + CPUMCPU.Hyper.edi], edi
mov dword [edx + CPUMCPU.Hyper.ebp], ebp
mov dword [edx + CPUMCPU.Hyper.esp], esp
; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
sldt [edx + CPUMCPU.Hyper.ldtr]
; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
mov rsi, [rdx + CPUMCPU.Host.cr3]
mov r8, [rdx + CPUM.ulOffCPUMCPU]
lgdt [rdx + r8 + CPUMCPU.Host.gdtr]
lidt [rdx + r8 + CPUMCPU.Host.idtr]
movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
ltr word [rdx + r8 + CPUMCPU.Host.tr]
movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
ltr word [rdx + r8 + CPUMCPU.Host.tr]
lldt [rdx + r8 + CPUMCPU.Host.ldtr]
mov eax, [rdx + r8 + CPUMCPU.Host.ds]
mov eax, [rdx + r8 + CPUMCPU.Host.es]
mov eax, [rdx + r8 + CPUMCPU.Host.fs]
mov eax, [rdx + r8 + CPUMCPU.Host.gs]
mov eax, [rdx + r8 + CPUMCPU.Host.ss]
mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
mov eax, [rbx + r8 + CPUMCPU.Host.SysEnter.cs]
mov edx, [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4]
mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
fxsave [rdx + r8 + CPUMCPU.Guest.fpu]
fxrstor [rdx + r8 + CPUMCPU.Host.fpu]
mov rcx, [rdx + r8 + CPUMCPU.Host.cr4]
mov rcx, [rdx + r8 + CPUMCPU.Host.cr0]
;mov rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
mov eax, [rbx + r8 + CPUMCPU.Host.FSbase]
mov edx, [rbx + r8 + CPUMCPU.Host.FSbase + 4]
mov eax, [rbx + r8 + CPUMCPU.Host.GSbase]
mov edx, [rbx + r8 + CPUMCPU.Host.GSbase + 4]
mov eax, [rbx + r8 + CPUMCPU.Host.efer]
mov edx, [rbx + r8 + CPUMCPU.Host.efer + 4]
; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
mov rbx, [rdx + r8 + CPUMCPU.Host.rbx]
; mov rcx, [rdx + r8 + CPUMCPU.Host.rcx] - scratch
; mov rdx, [rdx + r8 + CPUMCPU.Host.rdx] - scratch
mov rdi, [rdx + r8 + CPUMCPU.Host.rdi]
mov rsi, [rdx + r8 + CPUMCPU.Host.rsi]
mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
mov rbp, [rdx + r8 + CPUMCPU.Host.rbp]
; mov r8, [rdx + r8 + CPUMCPU.Host.r8 ] - scratch
; mov r9, [rdx + r8 + CPUMCPU.Host.r9 ] - scratch
mov r10, [rdx + r8 + CPUMCPU.Host.r10]
mov r11, [rdx + r8 + CPUMCPU.Host.r11]
mov r12, [rdx + r8 + CPUMCPU.Host.r12]
mov r13, [rdx + r8 + CPUMCPU.Host.r13]
mov r14, [rdx + r8 + CPUMCPU.Host.r14]
mov r15, [rdx + r8 + CPUMCPU.Host.r15]
push qword [rdx + r8 + CPUMCPU.Host.rflags]
mov rax, [rdx + r8 + CPUMCPU.Host.dr0]
mov rbx, [rdx + r8 + CPUMCPU.Host.dr1]
mov rcx, [rdx + r8 + CPUMCPU.Host.dr2]
mov rax, [rdx + r8 + CPUMCPU.Host.dr3]
mov rbx, [rdx + r8 + CPUMCPU.Host.dr6]
mov rcx, [rdx + r8 + CPUMCPU.Host.dr7]
at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
at VMMSWITCHERDEF.offHCCode0, dd 0
at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)