LegacyandAMD64.mac revision bdb7c0518c285ee49aab8b2b6592f01f0a2208d0
dc0d8d65d35787d30a275895ccad8d8e1b58a5ednd; VMM - World Switchers, 32-bit to AMD64 intermediate context.
dc0d8d65d35787d30a275895ccad8d8e1b58a5ednd; This is used for running 64-bit guest on 32-bit hosts, not
a78048ccbdb6256da15e6b0e7e95355e480c2301nd; normal raw-mode. All the code involved is contained in this
; available from http://www.virtualbox.org. This file is free software;
; you can redistribute it and/or modify it under the terms of the GNU
;; @note These values are from the HM64ON32OP enum in hm.h.
%include "VBox/asmdefs.mac"
%include "iprt/x86.mac"
%include "VBox/err.mac"
%include "VBox/apic.mac"
%include "VBox/vmm/cpum.mac"
%include "VBox/vmm/stam.mac"
%include "VBox/vmm/vm.mac"
%include "VBox/vmm/hm_vmx.mac"
%include "CPUMInternal.mac"
%include "HMInternal.mac"
%include "VMMSwitcher.mac"
FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
mov ecx, [edx + CPUM.fApicDisVectors]
mov edx, [edx + CPUM.pvApicBase]
FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
mov [edx + CPUMCPU.Host.ebx], ebx
mov [edx + CPUMCPU.Host.edi], edi
mov [edx + CPUMCPU.Host.esi], esi
mov [edx + CPUMCPU.Host.esp], esp
mov [edx + CPUMCPU.Host.ebp], ebp
mov [edx + CPUMCPU.Host.ds], ds
mov [edx + CPUMCPU.Host.es], es
mov [edx + CPUMCPU.Host.fs], fs
mov [edx + CPUMCPU.Host.gs], gs
mov [edx + CPUMCPU.Host.ss], ss
sldt [edx + CPUMCPU.Host.ldtr]
sidt [edx + CPUMCPU.Host.idtr]
sgdt [edx + CPUMCPU.Host.gdtr]
str [edx + CPUMCPU.Host.tr]
mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
mov ebx, [edx + CPUM.pvApicBase]
mov [edx + CPUM.fApicDisVectors], edi
mov [edx + CPUMCPU.Host.cr0], eax
mov [edx + CPUMCPU.Host.cr3], eax
mov [edx + CPUMCPU.Host.cr4], eax
mov [ebx + CPUMCPU.Host.efer], eax
mov [ebx + CPUMCPU.Host.efer + 4], edx
mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
lgdt [edx + CPUMCPU.Hyper.gdtr]
mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
mov eax, [rdx + CPUMCPU.Hyper.ss.Sel]
mov esp, [rdx + CPUMCPU.Hyper.esp]
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
mov r9, [rdx + CPUMCPU.Hyper.eip]
mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
fxrstor [rdx + CPUMCPU.Guest.fpu]
and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
lea rsi, [rdx + CPUMCPU.Guest.fpu]
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
mov dword [rdx + CPUMCPU.u32RetCode], eax
; HM code (used to be HMRCA.asm at one point).
; HM code (used to be HMRCA.asm at one point).
; HM code (used to be HMRCA.asm at one point).
; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
; Flush the VMCS write cache first (before any other vmreads/vmwrites!)
mov qword [rbx + VMCSCACHE.uPos], 2
mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
mov [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
mov [rbx + VMCSCACHE.TestIn.pCache], rbx
mov [rbx + VMCSCACHE.TestIn.pCtx], rsi
mov ecx, [rbx + VMCSCACHE.Write.cValidEntries]
mov eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
mov dword [rbx + VMCSCACHE.Write.cValidEntries], 0
mov qword [rbx + VMCSCACHE.uPos], 3
mov qword [rbx + VMCSCACHE.uPos], 4
LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
mov qword [rbx + VMCSCACHE.uPos], 5
mov rbx, qword [rsi + CPUMCTX.cr2]
mov rax, qword [rsi + CPUMCTX.eax]
mov rbx, qword [rsi + CPUMCTX.ebx]
mov rcx, qword [rsi + CPUMCTX.ecx]
mov rdx, qword [rsi + CPUMCTX.edx]
mov rbp, qword [rsi + CPUMCTX.ebp]
mov r8, qword [rsi + CPUMCTX.r8]
mov r9, qword [rsi + CPUMCTX.r9]
mov r10, qword [rsi + CPUMCTX.r10]
mov r11, qword [rsi + CPUMCTX.r11]
mov r12, qword [rsi + CPUMCTX.r12]
mov r13, qword [rsi + CPUMCTX.r13]
mov r14, qword [rsi + CPUMCTX.r14]
mov r15, qword [rsi + CPUMCTX.r15]
mov rdi, qword [rsi + CPUMCTX.edi]
mov rsi, qword [rsi + CPUMCTX.esi]
mov qword [rdi + CPUMCTX.eax], rax
mov qword [rdi + CPUMCTX.ebx], rbx
mov qword [rdi + CPUMCTX.ecx], rcx
mov qword [rdi + CPUMCTX.edx], rdx
mov qword [rdi + CPUMCTX.esi], rsi
mov qword [rdi + CPUMCTX.ebp], rbp
mov qword [rdi + CPUMCTX.r8], r8
mov qword [rdi + CPUMCTX.r9], r9
mov qword [rdi + CPUMCTX.r10], r10
mov qword [rdi + CPUMCTX.r11], r11
mov qword [rdi + CPUMCTX.r12], r12
mov qword [rdi + CPUMCTX.r13], r13
mov qword [rdi + CPUMCTX.r14], r14
mov qword [rdi + CPUMCTX.r15], r15
mov qword [rdi + CPUMCTX.cr2], rax
mov qword [rdi + CPUMCTX.edi], rax
SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
mov dword [rdi + VMCSCACHE.uPos], 7
mov [rdi + VMCSCACHE.TestOut.pCache], rdi
mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
mov [rdi + VMCSCACHE.TestOut.cr8], rax
mov ecx, [rdi + VMCSCACHE.Read.cValidEntries]
mov eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
vmread qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
mov [rdi + VMCSCACHE.cr2], rax
mov dword [rdi + VMCSCACHE.uPos], 8
mov dword [rdi + VMCSCACHE.uPos], 9
mov [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
mov [rdi + VMCSCACHE.TestOut.eflags], rdx
mov dword [rdi + VMCSCACHE.uPos], 12
mov dword [rdi + VMCSCACHE.uPos], 10
mov [rdi + VMCSCACHE.TestOut.pCache], rdi
mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
mov [rdi + VMCSCACHE.TestOut.pCache], rdi
mov [rdi + VMCSCACHE.TestOut.pCtx], rsi
mov dword [rdi + VMCSCACHE.uPos], 11
mov rbx, qword [rsi + CPUMCTX.ebx]
mov rcx, qword [rsi + CPUMCTX.ecx]
mov rdx, qword [rsi + CPUMCTX.edx]
mov rdi, qword [rsi + CPUMCTX.edi]
mov rbp, qword [rsi + CPUMCTX.ebp]
mov r8, qword [rsi + CPUMCTX.r8]
mov r9, qword [rsi + CPUMCTX.r9]
mov r10, qword [rsi + CPUMCTX.r10]
mov r11, qword [rsi + CPUMCTX.r11]
mov r12, qword [rsi + CPUMCTX.r12]
mov r13, qword [rsi + CPUMCTX.r13]
mov r14, qword [rsi + CPUMCTX.r14]
mov r15, qword [rsi + CPUMCTX.r15]
mov rsi, qword [rsi + CPUMCTX.esi]
; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
mov qword [rax + CPUMCTX.ebx], rbx
mov qword [rax + CPUMCTX.ecx], rcx
mov qword [rax + CPUMCTX.edx], rdx
mov qword [rax + CPUMCTX.esi], rsi
mov qword [rax + CPUMCTX.edi], rdi
mov qword [rax + CPUMCTX.ebp], rbp
mov qword [rax + CPUMCTX.r8], r8
mov qword [rax + CPUMCTX.r9], r9
mov qword [rax + CPUMCTX.r10], r10
mov qword [rax + CPUMCTX.r11], r11
mov qword [rax + CPUMCTX.r12], r12
mov qword [rax + CPUMCTX.r13], r13
mov qword [rax + CPUMCTX.r14], r14
mov qword [rax + CPUMCTX.r15], r15
fxsave [rsi + CPUMCTX.fpu]
mov qword [rsi + CPUMCTX.dr + 0*8], rax
mov qword [rsi + CPUMCTX.dr + 1*8], rax
mov qword [rsi + CPUMCTX.dr + 2*8], rax
mov qword [rsi + CPUMCTX.dr + 3*8], rax
mov qword [rsi + CPUMCTX.dr + 6*8], rax
mov esi, [edx + CPUMCPU.Host.cr3]
mov eax, [ebx + CPUMCPU.Host.efer]
mov edx, [ebx + CPUMCPU.Host.efer + 4]
lgdt [edx + CPUMCPU.Host.gdtr]
lidt [edx + CPUMCPU.Host.idtr]
movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
ltr word [edx + CPUMCPU.Host.tr]
lldt [edx + CPUMCPU.Host.ldtr]
mov eax, [edx + CPUMCPU.Host.ds]
mov eax, [edx + CPUMCPU.Host.es]
mov eax, [edx + CPUMCPU.Host.fs]
mov eax, [edx + CPUMCPU.Host.gs]
lss esp, [edx + CPUMCPU.Host.esp]
mov ecx, [edx + CPUMCPU.Host.cr4]
mov ecx, [edx + CPUMCPU.Host.cr0]
;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
mov edi, [edx + CPUMCPU.Host.edi]
mov esi, [edx + CPUMCPU.Host.esi]
mov ebx, [edx + CPUMCPU.Host.ebx]
mov ebp, [edx + CPUMCPU.Host.ebp]
mov eax, [edx + CPUMCPU.u32RetCode]
at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
at VMMSWITCHERDEF.offR0ToRawMode, dd NAME(vmmR0ToRawMode) - NAME(Start)
at VMMSWITCHERDEF.offRCToHost, dd NAME(vmmRCToHost) - NAME(Start)
at VMMSWITCHERDEF.offRCCallTrampoline, dd NAME(vmmRCCallTrampoline) - NAME(Start)
at VMMSWITCHERDEF.offRCToHostAsm, dd NAME(vmmRCToHostAsm) - NAME(Start)
at VMMSWITCHERDEF.offRCToHostAsmNoReturn, dd NAME(vmmRCToHostAsmNoReturn) - NAME(Start)
at VMMSWITCHERDEF.offHCCode0, dd 0
at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
at VMMSWITCHERDEF.offGCCode, dd 0
at VMMSWITCHERDEF.cbGCCode, dd 0