AMD64andLegacy.mac revision c626bd8465f241db74519c3c8dbe59ea620a9e34
1N/A; available from http://www.virtualbox.org. This file is free software;
1N/A%include "VBox/asmdefs.mac"
1N/A%include "VBox/apic.mac"
1N/A%include "iprt/x86.mac"
1N/A%include "VBox/vmm/cpum.mac"
1N/A%include "VBox/vmm/stam.mac"
1N/A%include "VBox/vmm/vm.mac"
1N/A%include "VBox/err.mac"
1N/A%include "CPUMInternal.mac"
1N/A%include "VMMSwitcher.mac"
lea rdx, [r9 + VM.cpum]
lea r8, [r9 + VM.StatSwitcherToGC]
mov r8d, [rdx + CPUM.offCPUMCPU0]
mov ecx, [rdx + r8 + CPUMCPU.fApicDisVectors]
cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
mov r8, [rdx + r8 + CPUMCPU.pvApicBase]
lea r8, [r9 + VM.StatSwitcherToGC]
FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
; Thunk to/from 64 bit when invoking the worker routine.
FIXUP FIX_HC_VM_OFF, 1, VM.cpum
FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
mov r8d, [rdx + CPUM.offCPUMCPU0]
; mov [rdx + r8 + CPUMCPU.Host.rax], rax - scratch
mov [rdx + r8 + CPUMCPU.Host.rbx], rbx
; mov [rdx + r8 + CPUMCPU.Host.rcx], rcx - scratch
; mov [rdx + r8 + CPUMCPU.Host.rdx], rdx - scratch
mov [rdx + r8 + CPUMCPU.Host.rdi], rdi
mov [rdx + r8 + CPUMCPU.Host.rsi], rsi
mov [rdx + r8 + CPUMCPU.Host.rsp], rsp
mov [rdx + r8 + CPUMCPU.Host.rbp], rbp
; mov [rdx + r8 + CPUMCPU.Host.r8 ], r8 - scratch
; mov [rdx + r8 + CPUMCPU.Host.r9 ], r9 - scratch
mov [rdx + r8 + CPUMCPU.Host.r10], r10
mov [rdx + r8 + CPUMCPU.Host.r11], r11
mov [rdx + r8 + CPUMCPU.Host.r12], r12
mov [rdx + r8 + CPUMCPU.Host.r13], r13
mov [rdx + r8 + CPUMCPU.Host.r14], r14
mov [rdx + r8 + CPUMCPU.Host.r15], r15
mov [rdx + r8 + CPUMCPU.Host.ds], ds
mov [rdx + r8 + CPUMCPU.Host.es], es
mov [rdx + r8 + CPUMCPU.Host.fs], fs
mov [rdx + r8 + CPUMCPU.Host.gs], gs
mov [rdx + r8 + CPUMCPU.Host.ss], ss
mov [rbx + r8 + CPUMCPU.Host.FSbase], eax
mov [rbx + r8 + CPUMCPU.Host.FSbase + 4], edx
mov [rbx + r8 + CPUMCPU.Host.GSbase], eax
mov [rbx + r8 + CPUMCPU.Host.GSbase + 4], edx
mov [rbx + r8 + CPUMCPU.Host.efer], eax
mov [rbx + r8 + CPUMCPU.Host.efer + 4], edx
sldt [rdx + r8 + CPUMCPU.Host.ldtr]
sidt [rdx + r8 + CPUMCPU.Host.idtr]
sgdt [rdx + r8 + CPUMCPU.Host.gdtr]
str [rdx + r8 + CPUMCPU.Host.tr] ; yasm BUG, generates sldt. YASMCHECK!
pop qword [rdx + r8 + CPUMCPU.Host.rflags]
cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1
mov rbx, [rdx + r8 + CPUMCPU.pvApicBase]
mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi
FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs], eax
mov [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4], edx
mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
and esi, ~CPUM_USED_FPU ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
mov [rdx + r8 + CPUMCPU.fUseFlags], esi
mov [rdx + r8 + CPUMCPU.Host.cr0], rax
;mov [rdx + r8 + CPUMCPU.Host.cr2], rax
mov [rdx + r8 + CPUMCPU.Host.cr3], rax
mov [rdx + r8 + CPUMCPU.Host.cr4], rax
; Change CR0 and CR4 so we can correctly emulate FPU/MMX/SSE[23] exceptions
mov ecx, [rdx + r8 + CPUMCPU.Guest.cr4]
; in CPUMCPU.Hyper.cr4 (which isn't currently being used). That should
and ecx, [rdx + CPUM.CR4.AndMask]
or eax, [rdx + CPUM.CR4.OrMask]
mov eax, [rdx + r8 + CPUMCPU.Guest.cr0]
lgdt [rdx + r8 + CPUMCPU.Hyper.gdtr]
mov ebp, [rdx + r8 + CPUMCPU.Hyper.cr3]
mov [rdx + r8 + CPUMCPU.Host.dr7], rax
mov [rdx + r8 + CPUMCPU.Host.dr6], rax
mov [rdx + r8 + CPUMCPU.Host.dr0], rax
mov [rdx + r8 + CPUMCPU.Host.dr1], rbx
mov [rdx + r8 + CPUMCPU.Host.dr2], rcx
mov [rdx + r8 + CPUMCPU.Host.dr3], rax
or dword [rdx + r8 + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HOST
lidt [edx + CPUMCPU.Hyper.idtr]
mov ax, [edx + CPUMCPU.Hyper.ss.Sel]
mov esp, [edx + CPUMCPU.Hyper.esp]
ltr word [edx + CPUMCPU.Hyper.tr.Sel]
lldt [edx + CPUMCPU.Hyper.ldtr.Sel]
mov esi, [edx + CPUMCPU.fUseFlags]
mov eax, [edx + CPUMCPU.Hyper.eax]
mov ebx, [edx + CPUMCPU.Hyper.ebx]
mov ecx, [edx + CPUMCPU.Hyper.ecx]
mov ebp, [edx + CPUMCPU.Hyper.ebp]
mov esi, [edx + CPUMCPU.Hyper.esi]
mov edi, [edx + CPUMCPU.Hyper.edi]
push dword [edx + CPUMCPU.Hyper.eflags]
push dword [edx + CPUMCPU.Hyper.eip]
mov edx, [edx + CPUMCPU.Hyper.edx] ; !! edx is no longer pointing to CPUMCPU here !!
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
iret ; Use iret to make debugging and TF/RF work.
mov ebx, [edx + CPUMCPU.Hyper.dr]
mov ecx, [edx + CPUMCPU.Hyper.dr + 8*1]
mov eax, [edx + CPUMCPU.Hyper.dr + 8*2]
mov ebx, [edx + CPUMCPU.Hyper.dr + 8*3]
mov eax, [edx + CPUMCPU.Hyper.dr + 8*7]
or dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
; i.e. traps in the hypervisor code. This will not return and saves no
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
pop dword [edx + CPUMCPU.Hyper.edx]
pop dword [edx + CPUMCPU.Hyper.eip] ; call return from stack
mov dword [edx + CPUMCPU.Hyper.esp], esp
mov dword [edx + CPUMCPU.Hyper.eax], eax
mov dword [edx + CPUMCPU.Hyper.ebx], ebx
mov dword [edx + CPUMCPU.Hyper.ecx], ecx
mov dword [edx + CPUMCPU.Hyper.esi], esi
mov dword [edx + CPUMCPU.Hyper.edi], edi
mov dword [edx + CPUMCPU.Hyper.ebp], ebp
; str [edx + CPUMCPU.Hyper.tr] - double fault only, and it won't be right then either.
sldt [edx + CPUMCPU.Hyper.ldtr.Sel]
; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
test dword [edx + CPUMCPU.fUseFlags], CPUM_USED_DEBUG_REGS_HYPER
mov rsi, [rdx + CPUMCPU.Host.cr3]
mov r8d, [rdx + CPUM.offCPUMCPU0]
lgdt [rdx + r8 + CPUMCPU.Host.gdtr]
lidt [rdx + r8 + CPUMCPU.Host.idtr]
movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
ltr word [rdx + r8 + CPUMCPU.Host.tr]
movzx eax, word [rdx + r8 + CPUMCPU.Host.tr] ; eax <- TR
ltr word [rdx + r8 + CPUMCPU.Host.tr]
lldt [rdx + r8 + CPUMCPU.Host.ldtr]
mov eax, [rdx + r8 + CPUMCPU.Host.ds]
mov eax, [rdx + r8 + CPUMCPU.Host.es]
mov eax, [rdx + r8 + CPUMCPU.Host.fs]
mov eax, [rdx + r8 + CPUMCPU.Host.gs]
mov eax, [rdx + r8 + CPUMCPU.Host.ss]
mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
mov eax, [rbx + r8 + CPUMCPU.Host.SysEnter.cs]
mov edx, [rbx + r8 + CPUMCPU.Host.SysEnter.cs + 4]
mov esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
fxsave [rdx + r8 + CPUMCPU.Guest.fpu]
o64 fxrstor [rdx + r8 + CPUMCPU.Host.fpu] ; Restore 64-bit host FPU state. See @bugref{7138}
mov rcx, [rdx + r8 + CPUMCPU.Host.cr4]
mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
mov rax, [rdx + r8 + CPUMCPU.Host.cr3]
mov rcx, [rdx + r8 + CPUMCPU.Host.cr0]
;mov rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
mov eax, [rbx + r8 + CPUMCPU.Host.FSbase]
mov edx, [rbx + r8 + CPUMCPU.Host.FSbase + 4]
mov eax, [rbx + r8 + CPUMCPU.Host.GSbase]
mov edx, [rbx + r8 + CPUMCPU.Host.GSbase + 4]
mov eax, [rbx + r8 + CPUMCPU.Host.efer]
mov edx, [rbx + r8 + CPUMCPU.Host.efer + 4]
; Restore debug registers (if modified). (ESI must still be fUseFlags! Must be done late, at least after CR4!)
and dword [rdx + r8 + CPUMCPU.fUseFlags], ~(CPUM_USED_DEBUG_REGS_HOST | CPUM_USED_DEBUG_REGS_HYPER)
; mov rax, [rdx + r8 + CPUMCPU.Host.rax] - scratch + return code
mov rbx, [rdx + r8 + CPUMCPU.Host.rbx]
; mov rcx, [rdx + r8 + CPUMCPU.Host.rcx] - scratch
; mov rdx, [rdx + r8 + CPUMCPU.Host.rdx] - scratch
mov rdi, [rdx + r8 + CPUMCPU.Host.rdi]
mov rsi, [rdx + r8 + CPUMCPU.Host.rsi]
mov rsp, [rdx + r8 + CPUMCPU.Host.rsp]
mov rbp, [rdx + r8 + CPUMCPU.Host.rbp]
; mov r8, [rdx + r8 + CPUMCPU.Host.r8 ] - scratch
; mov r9, [rdx + r8 + CPUMCPU.Host.r9 ] - scratch
mov r10, [rdx + r8 + CPUMCPU.Host.r10]
mov r11, [rdx + r8 + CPUMCPU.Host.r11]
mov r12, [rdx + r8 + CPUMCPU.Host.r12]
mov r13, [rdx + r8 + CPUMCPU.Host.r13]
mov r14, [rdx + r8 + CPUMCPU.Host.r14]
mov r15, [rdx + r8 + CPUMCPU.Host.r15]
push qword [rdx + r8 + CPUMCPU.Host.rflags]
mov rax, [rdx + r8 + CPUMCPU.Host.dr0]
mov rbx, [rdx + r8 + CPUMCPU.Host.dr1]
mov rcx, [rdx + r8 + CPUMCPU.Host.dr2]
mov rax, [rdx + r8 + CPUMCPU.Host.dr3]
mov rbx, [rdx + r8 + CPUMCPU.Host.dr6]
mov rcx, [rdx + r8 + CPUMCPU.Host.dr7]
at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
at VMMSWITCHERDEF.offHCCode0, dd 0
at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.offHCCode1, dd NAME(HCExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(HCExitTarget)
at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode0, dd NAME(JmpGCTarget) - NAME(IDEnterTarget)
at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode1, dd NAME(HCExitTarget) - NAME(IDExitTarget)
at VMMSWITCHERDEF.offGCCode, dd NAME(JmpGCTarget) - NAME(Start)
at VMMSWITCHERDEF.cbGCCode, dd NAME(IDExitTarget) - NAME(JmpGCTarget)