HMR0Mixed.mac revision 76d3e53889c5a02a3881bd3cfa31509d61cea9d0
; $Id$
;; @file
; HMR0Mixed.mac - Stuff that darwin needs to build two versions of.
;
; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
;
;
; Copyright (C) 2006-2012 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
; you can redistribute it and/or modify it under the terms of the GNU
; General Public License (GPL) as published by the Free Software
; Foundation, in version 2 as it comes in the "COPYING" file of the
; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
;
%ifndef VBOX_WITH_OLD_VTX_CODE
%ifdef RT_ARCH_AMD64
%define VMX_SKIP_GDTR_IDTR
%define VMX_SKIP_LDTR_TR
%endif
%endif
;/**
; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
; *
; * @returns VBox status code
; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
; */
ALIGNCODE(16)
BEGINPROC MY_NAME(VMXR0StartVM32)
push xBP
mov xBP, xSP
pushf
cli
; Save all general purpose host registers.
MYPUSHAD
; First we have to save some final CPU context registers.
mov eax, VMX_VMCS_HOST_RIP
%ifdef RT_ARCH_AMD64
lea r10, [.vmlaunch_done wrt rip]
vmwrite rax, r10
%else
mov ecx, .vmlaunch_done
vmwrite eax, ecx
%endif
; Note: assumes success!
; Manual save and restore:
; - General purpose registers except RIP, RSP.
;
; Trashed:
; - CR2 (we don't care).
; - LDTR (reset to 0).
; - DRx (presumably not changed at all).
; - DR7 (reset to 0x400).
; - EFLAGS (reset to RT_BIT(1); not relevant).
; Save the Guest CPU context pointer.
%ifdef RT_ARCH_AMD64
%ifdef ASM_CALL64_GCC
; fResume already in rdi
; pCtx already in rsi
mov rbx, rdx ; pCache
%else
mov rdi, rcx ; fResume
mov rsi, rdx ; pCtx
mov rbx, r8 ; pCache
%endif
%else
mov edi, [ebp + 8] ; fResume
mov esi, [ebp + 12] ; pCtx
mov ebx, [ebp + 16] ; pCache
%endif
; Save segment registers.
; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
MYPUSHSEGS xAX, ax
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
cmp ecx, 0
je .no_cached_writes
mov edx, ecx
mov ecx, 0
jmp .cached_write
ALIGN(16)
.cached_write:
mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
inc xCX
cmp xCX, xDX
jl .cached_write
mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
.no_cached_writes:
; Save the pCache pointer.
push xBX
%endif
; Save the pCtx pointer.
push xSI
%ifndef VMX_SKIP_LDTR_TR
; Save LDTR.
xor eax, eax
sldt ax
push xAX
; The TR limit is reset to 0x67; restore it manually.
str eax
push xAX
%endif
%ifndef VMX_SKIP_GDTR_IDTR
; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
sub xSP, xCB * 2
sgdt [xSP]
sub xSP, xCB * 2
sidt [xSP]
%endif
; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
mov xBX, [xSI + CPUMCTX.cr2]
mov xDX, cr2
cmp xBX, xDX
je .skipcr2write32
mov cr2, xBX
.skipcr2write32:
mov eax, VMX_VMCS_HOST_RSP
vmwrite xAX, xSP
; Note: assumes success!
; Don't mess with ESP anymore!!!
; Load Guest's general purpose registers.
mov eax, [xSI + CPUMCTX.eax]
mov ebx, [xSI + CPUMCTX.ebx]
mov ecx, [xSI + CPUMCTX.ecx]
mov edx, [xSI + CPUMCTX.edx]
mov ebp, [xSI + CPUMCTX.ebp]
; Resume or start?
cmp xDI, 0 ; fResume
je .vmlaunch_launch
; Restore edi & esi.
mov edi, [xSI + CPUMCTX.edi]
mov esi, [xSI + CPUMCTX.esi]
vmresume
jmp .vmlaunch_done; ; Here if vmresume detected a failure.
.vmlaunch_launch:
; Restore edi & esi.
mov edi, [xSI + CPUMCTX.edi]
mov esi, [xSI + CPUMCTX.esi]
vmlaunch
jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
ALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
.vmlaunch_done:
jc near .vmxstart_invalid_vmcs_ptr
jz near .vmxstart_start_failed
; Restore base and limit of the IDTR & GDTR.
%ifndef VMX_SKIP_GDTR_IDTR
lidt [xSP]
add xSP, xCB * 2
lgdt [xSP]
add xSP, xCB * 2
%endif
push xDI
%ifndef VMX_SKIP_LDTR_TR
mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, LDTR + TR).
%else
mov xDI, [xSP + xCB] ; pCtx
%endif
mov [ss:xDI + CPUMCTX.eax], eax
mov [ss:xDI + CPUMCTX.ebx], ebx
mov [ss:xDI + CPUMCTX.ecx], ecx
mov [ss:xDI + CPUMCTX.edx], edx
mov [ss:xDI + CPUMCTX.esi], esi
mov [ss:xDI + CPUMCTX.ebp], ebp
%ifndef VBOX_WITH_OLD_VTX_CODE
mov xAX, cr2
mov [ss:xDI + CPUMCTX.cr2], xAX
%endif
%ifdef RT_ARCH_AMD64
pop xAX ; The guest edi we pushed above.
mov dword [ss:xDI + CPUMCTX.edi], eax
%else
pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
%endif
%ifndef VMX_SKIP_LDTR_TR
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
pop xAX ; Saved LDTR
lldt ax
%endif
add xSP, xCB ; pCtx
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
pop xDX ; Saved pCache
mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
cmp ecx, 0 ; Can't happen
je .no_cached_reads
jmp .cached_read
ALIGN(16)
.cached_read:
dec xCX
mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
cmp xCX, 0
jnz .cached_read
.no_cached_reads:
%ifdef VBOX_WITH_OLD_VTX_CODE
; Restore CR2 into VMCS-cache field (for EPT).
mov xAX, cr2
mov [ss:xDX + VMCSCACHE.cr2], xAX
%endif
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore general purpose registers.
MYPOPAD
mov eax, VINF_SUCCESS
.vmstart_end:
popf
pop xBP
ret
.vmxstart_invalid_vmcs_ptr:
; Restore base and limit of the IDTR & GDTR
%ifndef VMX_SKIP_GDTR_IDTR
lidt [xSP]
add xSP, xCB * 2
lgdt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_LDTR_TR
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
pop xAX ; Saved LDTR
lldt ax
%endif
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
add xSP, xCB * 2 ; pCtx + pCache
%else
add xSP, xCB ; pCtx
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore all general purpose host registers.
MYPOPAD
mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
jmp .vmstart_end
.vmxstart_start_failed:
; Restore base and limit of the IDTR & GDTR.
%ifndef VMX_SKIP_GDTR_IDTR
lidt [xSP]
add xSP, xCB * 2
lgdt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_LDTR_TR
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
pop xAX ; Saved LDTR
lldt ax
%endif
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
add xSP, xCB * 2 ; pCtx + pCache
%else
add xSP, xCB ; pCtx
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore all general purpose host registers.
MYPOPAD
mov eax, VERR_VMX_UNABLE_TO_START_VM
jmp .vmstart_end
ENDPROC MY_NAME(VMXR0StartVM32)
%ifdef RT_ARCH_AMD64
;/**
; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
; *
; * @returns VBox status code
; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
; * @param pCtx msc:rdx, gcc:rsi Guest context
; * @param pCache msc:r8, gcc:rdx VMCS cache
; */
ALIGNCODE(16)
BEGINPROC MY_NAME(VMXR0StartVM64)
push xBP
mov xBP, xSP
pushf
cli
; Save all general purpose host registers.
MYPUSHAD
; First we have to save some final CPU context registers.
lea r10, [.vmlaunch64_done wrt rip]
mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
vmwrite rax, r10
; Note: assumes success!
; Manual save and restore:
; - General purpose registers except RIP, RSP.
;
; Trashed:
; - CR2 (we don't care).
; - LDTR (reset to 0).
; - DRx (presumably not changed at all).
; - DR7 (reset to 0x400).
; - EFLAGS (reset to RT_BIT(1); not relevant).
; Save the Guest CPU context pointer.
%ifdef ASM_CALL64_GCC
; fResume already in rdi
; pCtx already in rsi
mov rbx, rdx ; pCache
%else
mov rdi, rcx ; fResume
mov rsi, rdx ; pCtx
mov rbx, r8 ; pCache
%endif
; Save segment registers.
; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
MYPUSHSEGS xAX, ax
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
cmp ecx, 0
je .no_cached_writes
mov edx, ecx
mov ecx, 0
jmp .cached_write
ALIGN(16)
.cached_write:
mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
inc xCX
cmp xCX, xDX
jl .cached_write
mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
.no_cached_writes:
; Save the pCache pointer.
push xBX
%endif
%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
; Save the host MSRs and load the guest MSRs.
LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
%else
%ifdef VBOX_WITH_OLD_VTX_CODE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
%endif
%endif
; Save the pCtx pointer.
push xSI
; Save LDTR.
xor eax, eax
sldt ax
push xAX
; The TR limit is reset to 0x67; restore it manually.
str eax
push xAX
; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
%ifndef VMX_SKIP_GDTR_IDTR
sub xSP, xCB * 2
sgdt [xSP]
sub xSP, xCB * 2
sidt [xSP]
%endif
; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
mov rbx, qword [xSI + CPUMCTX.cr2]
mov rdx, cr2
cmp rbx, rdx
je .skipcr2write
mov cr2, rbx
.skipcr2write:
mov eax, VMX_VMCS_HOST_RSP
vmwrite xAX, xSP
; Note: assumes success!
; Don't mess with ESP anymore!!!
; Restore Guest's general purpose registers.
mov rax, qword [xSI + CPUMCTX.eax]
mov rbx, qword [xSI + CPUMCTX.ebx]
mov rcx, qword [xSI + CPUMCTX.ecx]
mov rdx, qword [xSI + CPUMCTX.edx]
mov rbp, qword [xSI + CPUMCTX.ebp]
mov r8, qword [xSI + CPUMCTX.r8]
mov r9, qword [xSI + CPUMCTX.r9]
mov r10, qword [xSI + CPUMCTX.r10]
mov r11, qword [xSI + CPUMCTX.r11]
mov r12, qword [xSI + CPUMCTX.r12]
mov r13, qword [xSI + CPUMCTX.r13]
mov r14, qword [xSI + CPUMCTX.r14]
mov r15, qword [xSI + CPUMCTX.r15]
; Resume or start?
cmp xDI, 0 ; fResume
je .vmlaunch64_launch
; Restore edi & esi.
mov rdi, qword [xSI + CPUMCTX.edi]
mov rsi, qword [xSI + CPUMCTX.esi]
vmresume
jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
.vmlaunch64_launch:
; Restore rdi & rsi.
mov rdi, qword [xSI + CPUMCTX.edi]
mov rsi, qword [xSI + CPUMCTX.esi]
vmlaunch
jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
ALIGNCODE(16)
.vmlaunch64_done:
jc near .vmxstart64_invalid_vmcs_ptr
jz near .vmxstart64_start_failed
; Restore base and limit of the IDTR & GDTR
%ifndef VMX_SKIP_GDTR_IDTR
lidt [xSP]
add xSP, xCB * 2
lgdt [xSP]
add xSP, xCB * 2
%endif
push xDI
mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved LDTR + TR)
mov qword [xDI + CPUMCTX.eax], rax
mov qword [xDI + CPUMCTX.ebx], rbx
mov qword [xDI + CPUMCTX.ecx], rcx
mov qword [xDI + CPUMCTX.edx], rdx
mov qword [xDI + CPUMCTX.esi], rsi
mov qword [xDI + CPUMCTX.ebp], rbp
mov qword [xDI + CPUMCTX.r8], r8
mov qword [xDI + CPUMCTX.r9], r9
mov qword [xDI + CPUMCTX.r10], r10
mov qword [xDI + CPUMCTX.r11], r11
mov qword [xDI + CPUMCTX.r12], r12
mov qword [xDI + CPUMCTX.r13], r13
mov qword [xDI + CPUMCTX.r14], r14
mov qword [xDI + CPUMCTX.r15], r15
%ifndef VBOX_WITH_OLD_VTX_CODE
mov rax, cr2
mov qword [xDI + CPUMCTX.cr2], rax
%endif
pop xAX ; The guest edi we pushed above
mov qword [xDI + CPUMCTX.edi], rax
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
pop xAX ; Saved LDTR
lldt ax
pop xSI ; pCtx (needed in rsi by the macros below)
%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
; Save the guest MSRs and load the host MSRs.
LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
%else
%ifdef VBOX_WITH_OLD_VTX_CODE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
%endif
%endif
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
pop xDX ; Saved pCache
mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
cmp ecx, 0 ; Can't happen
je .no_cached_reads
jmp .cached_read
ALIGN(16)
.cached_read:
dec xCX
mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
cmp xCX, 0
jnz .cached_read
.no_cached_reads:
%ifdef VBOX_WITH_OLD_VTX_CODE
; Restore CR2 into VMCS-cache field (for EPT).
mov xAX, cr2
mov [xDX + VMCSCACHE.cr2], xAX
%endif
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore general purpose registers.
MYPOPAD
mov eax, VINF_SUCCESS
.vmstart64_end:
popf
pop xBP
ret
.vmxstart64_invalid_vmcs_ptr:
; Restore base and limit of the IDTR & GDTR.
%ifndef VMX_SKIP_GDTR_IDTR
lidt [xSP]
add xSP, xCB * 2
lgdt [xSP]
add xSP, xCB * 2
%endif
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
pop xAX ; Saved LDTR
lldt ax
pop xSI ; pCtx (needed in rsi by the macros below)
%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
LOADHOSTMSR MSR_K8_SF_MASK
LOADHOSTMSR MSR_K6_STAR
LOADHOSTMSR MSR_K8_LSTAR
%else
%ifdef VBOX_WITH_OLD_VTX_CODE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
%endif
%endif
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
add xSP, xCB ; pCache
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore all general purpose host registers.
MYPOPAD
mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
jmp .vmstart64_end
.vmxstart64_start_failed:
; Restore base and limit of the IDTR & GDTR.
%ifndef VMX_SKIP_GDTR_IDTR
lidt [xSP]
add xSP, xCB * 2
lgdt [xSP]
add xSP, xCB * 2
%endif
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
pop xAX ; Saved LDTR
lldt ax
pop xSI ; pCtx (needed in rsi by the macros below).
%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
LOADHOSTMSR MSR_K8_SF_MASK
LOADHOSTMSR MSR_K6_STAR
LOADHOSTMSR MSR_K8_LSTAR
%else
%ifdef VBOX_WITH_OLD_VTX_CODE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
%endif
%endif
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
add xSP, xCB ; pCache
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore all general purpose host registers.
MYPOPAD
mov eax, VERR_VMX_UNABLE_TO_START_VM
jmp .vmstart64_end
ENDPROC MY_NAME(VMXR0StartVM64)
%endif ; RT_ARCH_AMD64
;/**
; * Prepares for and executes VMRUN (32 bits guests)
; *
; * @returns VBox status code
; * @param HCPhysVMCB Physical address of host VMCB
; * @param HCPhysVMCB Physical address of guest VMCB
; * @param pCtx Guest context
; */
ALIGNCODE(16)
BEGINPROC MY_NAME(SVMR0VMRun)
%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
%ifdef ASM_CALL64_GCC
push rdx
push rsi
push rdi
%else
push r8
push rdx
push rcx
%endif
push 0
%endif
push xBP
mov xBP, xSP
pushf
; Manual save and restore:
; - General purpose registers except RIP, RSP, RAX
;
; Trashed:
; - CR2 (we don't care)
; - LDTR (reset to 0)
; - DRx (presumably not changed at all)
; - DR7 (reset to 0x400)
; Save all general purpose host registers.
MYPUSHAD
; Save the Guest CPU context pointer.
mov xSI, [xBP + xCB * 2] ; pCtx
push xSI ; push for saving the state at the end
; Save host fs, gs, sysenter msr etc.
mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
push xAX ; save for the vmload after vmrun
vmsave
; Setup eax for VMLOAD.
mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
; Restore Guest's general purpose registers.
; eax is loaded from the VMCB by VMRUN.
mov ebx, [xSI + CPUMCTX.ebx]
mov ecx, [xSI + CPUMCTX.ecx]
mov edx, [xSI + CPUMCTX.edx]
mov edi, [xSI + CPUMCTX.edi]
mov ebp, [xSI + CPUMCTX.ebp]
mov esi, [xSI + CPUMCTX.esi]
; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
clgi
sti
; Load guest fs, gs, sysenter msr etc.
vmload
; Run the VM.
vmrun
; eax is in the VMCB already; we can use it here.
; Save guest fs, gs, sysenter msr etc.
vmsave
; Load host fs, gs, sysenter msr etc.
pop xAX ; Pushed above
vmload
; Set the global interrupt flag again, but execute cli to make sure IF=0.
cli
stgi
pop xAX ; pCtx
mov [ss:xAX + CPUMCTX.ebx], ebx
mov [ss:xAX + CPUMCTX.ecx], ecx
mov [ss:xAX + CPUMCTX.edx], edx
mov [ss:xAX + CPUMCTX.esi], esi
mov [ss:xAX + CPUMCTX.edi], edi
mov [ss:xAX + CPUMCTX.ebp], ebp
; Restore general purpose registers.
MYPOPAD
mov eax, VINF_SUCCESS
popf
pop xBP
%ifdef RT_ARCH_AMD64
add xSP, 4*xCB
%endif
ret
ENDPROC MY_NAME(SVMR0VMRun)
%ifdef RT_ARCH_AMD64
;/**
; * Prepares for and executes VMRUN (64 bits guests)
; *
; * @returns VBox status code
; * @param HCPhysVMCB Physical address of host VMCB
; * @param HCPhysVMCB Physical address of guest VMCB
; * @param pCtx Guest context
; */
ALIGNCODE(16)
BEGINPROC MY_NAME(SVMR0VMRun64)
; Fake a cdecl stack frame
%ifdef ASM_CALL64_GCC
push rdx
push rsi
push rdi
%else
push r8
push rdx
push rcx
%endif
push 0
push rbp
mov rbp, rsp
pushf
; Manual save and restore:
; - General purpose registers except RIP, RSP, RAX
;
; Trashed:
; - CR2 (we don't care)
; - LDTR (reset to 0)
; - DRx (presumably not changed at all)
; - DR7 (reset to 0x400)
;
; Save all general purpose host registers.
MYPUSHAD
; Save the Guest CPU context pointer.
mov rsi, [rbp + xCB * 2] ; pCtx
push rsi ; push for saving the state at the end
; Save host fs, gs, sysenter msr etc.
mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
push rax ; Save for the vmload after vmrun
vmsave
; Setup eax for VMLOAD.
mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
; Restore Guest's general purpose registers.
; rax is loaded from the VMCB by VMRUN.
mov rbx, qword [xSI + CPUMCTX.ebx]
mov rcx, qword [xSI + CPUMCTX.ecx]
mov rdx, qword [xSI + CPUMCTX.edx]
mov rdi, qword [xSI + CPUMCTX.edi]
mov rbp, qword [xSI + CPUMCTX.ebp]
mov r8, qword [xSI + CPUMCTX.r8]
mov r9, qword [xSI + CPUMCTX.r9]
mov r10, qword [xSI + CPUMCTX.r10]
mov r11, qword [xSI + CPUMCTX.r11]
mov r12, qword [xSI + CPUMCTX.r12]
mov r13, qword [xSI + CPUMCTX.r13]
mov r14, qword [xSI + CPUMCTX.r14]
mov r15, qword [xSI + CPUMCTX.r15]
mov rsi, qword [xSI + CPUMCTX.esi]
; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
clgi
sti
; Load guest fs, gs, sysenter msr etc.
vmload
; Run the VM.
vmrun
; rax is in the VMCB already; we can use it here.
; Save guest fs, gs, sysenter msr etc.
vmsave
; Load host fs, gs, sysenter msr etc.
pop rax ; pushed above
vmload
; Set the global interrupt flag again, but execute cli to make sure IF=0.
cli
stgi
pop rax ; pCtx
mov qword [rax + CPUMCTX.ebx], rbx
mov qword [rax + CPUMCTX.ecx], rcx
mov qword [rax + CPUMCTX.edx], rdx
mov qword [rax + CPUMCTX.esi], rsi
mov qword [rax + CPUMCTX.edi], rdi
mov qword [rax + CPUMCTX.ebp], rbp
mov qword [rax + CPUMCTX.r8], r8
mov qword [rax + CPUMCTX.r9], r9
mov qword [rax + CPUMCTX.r10], r10
mov qword [rax + CPUMCTX.r11], r11
mov qword [rax + CPUMCTX.r12], r12
mov qword [rax + CPUMCTX.r13], r13
mov qword [rax + CPUMCTX.r14], r14
mov qword [rax + CPUMCTX.r15], r15
; Restore general purpose registers.
MYPOPAD
mov eax, VINF_SUCCESS
popf
pop rbp
add rsp, 4*xCB
ret
ENDPROC MY_NAME(SVMR0VMRun64)
%endif ; RT_ARCH_AMD64