HMR0Mixed.mac revision 0296fdabd63ae9bf3d7618040ed4f3ccb872d62a
af062818b47340eef15700d2f0211576ba3506eevboxsync; HMR0Mixed.mac - Stuff that darwin needs to build two versions of.
af062818b47340eef15700d2f0211576ba3506eevboxsync; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined.
af062818b47340eef15700d2f0211576ba3506eevboxsync; Copyright (C) 2006-2013 Oracle Corporation
af062818b47340eef15700d2f0211576ba3506eevboxsync; This file is part of VirtualBox Open Source Edition (OSE), as
af062818b47340eef15700d2f0211576ba3506eevboxsync; available from http://www.virtualbox.org. This file is free software;
af062818b47340eef15700d2f0211576ba3506eevboxsync; you can redistribute it and/or modify it under the terms of the GNU
af062818b47340eef15700d2f0211576ba3506eevboxsync; General Public License (GPL) as published by the Free Software
af062818b47340eef15700d2f0211576ba3506eevboxsync; Foundation, in version 2 as it comes in the "COPYING" file of the
af062818b47340eef15700d2f0211576ba3506eevboxsync; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
af062818b47340eef15700d2f0211576ba3506eevboxsync; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
af062818b47340eef15700d2f0211576ba3506eevboxsync%ifndef VBOX_WITH_OLD_VTX_CODE
b955672b950093ff7416d1269dd4d3b69983bd8fvboxsync %ifdef RT_ARCH_AMD64
4b9d6701570cb98fd36e209314239d104ec584d3vboxsync %define VMX_SKIP_GDTR_IDTR
4b9d6701570cb98fd36e209314239d104ec584d3vboxsync %define VMX_SKIP_LDTR_TR
b955672b950093ff7416d1269dd4d3b69983bd8fvboxsync; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
af062818b47340eef15700d2f0211576ba3506eevboxsync; * @returns VBox status code
af062818b47340eef15700d2f0211576ba3506eevboxsync; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
af062818b47340eef15700d2f0211576ba3506eevboxsync; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
af062818b47340eef15700d2f0211576ba3506eevboxsync; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
af062818b47340eef15700d2f0211576ba3506eevboxsyncALIGNCODE(16)
af062818b47340eef15700d2f0211576ba3506eevboxsyncBEGINPROC MY_NAME(VMXR0StartVM32)
af062818b47340eef15700d2f0211576ba3506eevboxsync mov xBP, xSP
af062818b47340eef15700d2f0211576ba3506eevboxsync ; Save all general purpose host registers.
af062818b47340eef15700d2f0211576ba3506eevboxsync ; First we have to save some final CPU context registers.
af062818b47340eef15700d2f0211576ba3506eevboxsync mov eax, VMX_VMCS_HOST_RIP
af062818b47340eef15700d2f0211576ba3506eevboxsync%ifdef RT_ARCH_AMD64
af062818b47340eef15700d2f0211576ba3506eevboxsync lea r10, [.vmlaunch_done wrt rip]
af062818b47340eef15700d2f0211576ba3506eevboxsync vmwrite rax, r10
af062818b47340eef15700d2f0211576ba3506eevboxsync mov ecx, .vmlaunch_done
af062818b47340eef15700d2f0211576ba3506eevboxsync vmwrite eax, ecx
af062818b47340eef15700d2f0211576ba3506eevboxsync ; Note: assumes success!
af062818b47340eef15700d2f0211576ba3506eevboxsync ; Manual save and restore:
af062818b47340eef15700d2f0211576ba3506eevboxsync ; - General purpose registers except RIP, RSP.
af062818b47340eef15700d2f0211576ba3506eevboxsync ; - CR2 (we don't care).
af062818b47340eef15700d2f0211576ba3506eevboxsync ; - LDTR (reset to 0).
589fd26cedb2b4ebbed14f2964cad03cc8ebbca2vboxsync ; - DRx (presumably not changed at all).
af062818b47340eef15700d2f0211576ba3506eevboxsync ; - DR7 (reset to 0x400).
af062818b47340eef15700d2f0211576ba3506eevboxsync ; - EFLAGS (reset to RT_BIT(1); not relevant).
af062818b47340eef15700d2f0211576ba3506eevboxsync ; Save the Guest CPU context pointer.
af062818b47340eef15700d2f0211576ba3506eevboxsync%ifdef RT_ARCH_AMD64
af062818b47340eef15700d2f0211576ba3506eevboxsync %ifdef ASM_CALL64_GCC
af062818b47340eef15700d2f0211576ba3506eevboxsync ; fResume already in rdi
af062818b47340eef15700d2f0211576ba3506eevboxsync ; pCtx already in rsi
af062818b47340eef15700d2f0211576ba3506eevboxsync mov rbx, rdx ; pCache
af062818b47340eef15700d2f0211576ba3506eevboxsync mov rdi, rcx ; fResume
af062818b47340eef15700d2f0211576ba3506eevboxsync mov rsi, rdx ; pCtx
af062818b47340eef15700d2f0211576ba3506eevboxsync mov rbx, r8 ; pCache
af062818b47340eef15700d2f0211576ba3506eevboxsync mov edi, [ebp + 8] ; fResume
af062818b47340eef15700d2f0211576ba3506eevboxsync mov esi, [ebp + 12] ; pCtx
af062818b47340eef15700d2f0211576ba3506eevboxsync mov ebx, [ebp + 16] ; pCache
af062818b47340eef15700d2f0211576ba3506eevboxsync ; Save segment registers.
af062818b47340eef15700d2f0211576ba3506eevboxsync ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
af062818b47340eef15700d2f0211576ba3506eevboxsync MYPUSHSEGS xAX, ax
af062818b47340eef15700d2f0211576ba3506eevboxsync%ifdef VMX_USE_CACHED_VMCS_ACCESSES
af062818b47340eef15700d2f0211576ba3506eevboxsync je .no_cached_writes
af062818b47340eef15700d2f0211576ba3506eevboxsync mov edx, ecx
af062818b47340eef15700d2f0211576ba3506eevboxsync jmp .cached_write
af062818b47340eef15700d2f0211576ba3506eevboxsync.cached_write:
af062818b47340eef15700d2f0211576ba3506eevboxsync mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
af062818b47340eef15700d2f0211576ba3506eevboxsync vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
af062818b47340eef15700d2f0211576ba3506eevboxsync cmp xCX, xDX
mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
mov xBX, [xSI + CPUMCTX.cr2]
mov eax, [xSI + CPUMCTX.eax]
mov ebx, [xSI + CPUMCTX.ebx]
mov ecx, [xSI + CPUMCTX.ecx]
mov edx, [xSI + CPUMCTX.edx]
mov ebp, [xSI + CPUMCTX.ebp]
mov edi, [xSI + CPUMCTX.edi]
mov esi, [xSI + CPUMCTX.esi]
mov edi, [xSI + CPUMCTX.edi]
mov esi, [xSI + CPUMCTX.esi]
mov [ss:xDI + CPUMCTX.eax], eax
mov [ss:xDI + CPUMCTX.ebx], ebx
mov [ss:xDI + CPUMCTX.ecx], ecx
mov [ss:xDI + CPUMCTX.edx], edx
mov [ss:xDI + CPUMCTX.esi], esi
mov [ss:xDI + CPUMCTX.ebp], ebp
mov [ss:xDI + CPUMCTX.cr2], xAX
mov dword [ss:xDI + CPUMCTX.edi], eax
pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
mov [ss:xDX + VMCSCACHE.cr2], xAX
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
mov rbx, qword [xSI + CPUMCTX.cr2]
mov rax, qword [xSI + CPUMCTX.eax]
mov rbx, qword [xSI + CPUMCTX.ebx]
mov rcx, qword [xSI + CPUMCTX.ecx]
mov rdx, qword [xSI + CPUMCTX.edx]
mov rbp, qword [xSI + CPUMCTX.ebp]
mov r8, qword [xSI + CPUMCTX.r8]
mov r9, qword [xSI + CPUMCTX.r9]
mov r10, qword [xSI + CPUMCTX.r10]
mov r11, qword [xSI + CPUMCTX.r11]
mov r12, qword [xSI + CPUMCTX.r12]
mov r13, qword [xSI + CPUMCTX.r13]
mov r14, qword [xSI + CPUMCTX.r14]
mov r15, qword [xSI + CPUMCTX.r15]
mov rdi, qword [xSI + CPUMCTX.edi]
mov rsi, qword [xSI + CPUMCTX.esi]
mov rdi, qword [xSI + CPUMCTX.edi]
mov rsi, qword [xSI + CPUMCTX.esi]
mov qword [xDI + CPUMCTX.eax], rax
mov qword [xDI + CPUMCTX.ebx], rbx
mov qword [xDI + CPUMCTX.ecx], rcx
mov qword [xDI + CPUMCTX.edx], rdx
mov qword [xDI + CPUMCTX.esi], rsi
mov qword [xDI + CPUMCTX.ebp], rbp
mov qword [xDI + CPUMCTX.r8], r8
mov qword [xDI + CPUMCTX.r9], r9
mov qword [xDI + CPUMCTX.r10], r10
mov qword [xDI + CPUMCTX.r11], r11
mov qword [xDI + CPUMCTX.r12], r12
mov qword [xDI + CPUMCTX.r13], r13
mov qword [xDI + CPUMCTX.r14], r14
mov qword [xDI + CPUMCTX.r15], r15
mov qword [xDI + CPUMCTX.cr2], rax
mov qword [xDI + CPUMCTX.edi], rax
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
mov [xDX + VMCSCACHE.cr2], xAX
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
mov ebx, [xSI + CPUMCTX.ebx]
mov ecx, [xSI + CPUMCTX.ecx]
mov edx, [xSI + CPUMCTX.edx]
mov edi, [xSI + CPUMCTX.edi]
mov ebp, [xSI + CPUMCTX.ebp]
mov esi, [xSI + CPUMCTX.esi]
; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
mov [ss:xAX + CPUMCTX.ebx], ebx
mov [ss:xAX + CPUMCTX.ecx], ecx
mov [ss:xAX + CPUMCTX.edx], edx
mov [ss:xAX + CPUMCTX.esi], esi
mov [ss:xAX + CPUMCTX.edi], edi
mov [ss:xAX + CPUMCTX.ebp], ebp
mov rbx, qword [xSI + CPUMCTX.ebx]
mov rcx, qword [xSI + CPUMCTX.ecx]
mov rdx, qword [xSI + CPUMCTX.edx]
mov rdi, qword [xSI + CPUMCTX.edi]
mov rbp, qword [xSI + CPUMCTX.ebp]
mov r8, qword [xSI + CPUMCTX.r8]
mov r9, qword [xSI + CPUMCTX.r9]
mov r10, qword [xSI + CPUMCTX.r10]
mov r11, qword [xSI + CPUMCTX.r11]
mov r12, qword [xSI + CPUMCTX.r12]
mov r13, qword [xSI + CPUMCTX.r13]
mov r14, qword [xSI + CPUMCTX.r14]
mov r15, qword [xSI + CPUMCTX.r15]
mov rsi, qword [xSI + CPUMCTX.esi]
; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
mov qword [rax + CPUMCTX.ebx], rbx
mov qword [rax + CPUMCTX.ecx], rcx
mov qword [rax + CPUMCTX.edx], rdx
mov qword [rax + CPUMCTX.esi], rsi
mov qword [rax + CPUMCTX.edi], rdi
mov qword [rax + CPUMCTX.ebp], rbp
mov qword [rax + CPUMCTX.r8], r8
mov qword [rax + CPUMCTX.r9], r9
mov qword [rax + CPUMCTX.r10], r10
mov qword [rax + CPUMCTX.r11], r11
mov qword [rax + CPUMCTX.r12], r12
mov qword [rax + CPUMCTX.r13], r13
mov qword [rax + CPUMCTX.r14], r14
mov qword [rax + CPUMCTX.r15], r15