HMR0Mixed.mac revision d312b776e8619a8616594cf1f8240a365e71a2d4
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; $Id$
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end;; @file
fd9abdda70912b99b24e3bf1a38f26fde908a74cnd; HM - Ring-0 Host 32/64, Guest 32/64 world-switch routines
fd9abdda70912b99b24e3bf1a38f26fde908a74cnd;
fd9abdda70912b99b24e3bf1a38f26fde908a74cnd; Darwin uses this to build two versions in the hybrid case.
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; Included by HMR0A.asm with RT_ARCH_AMD64 defined or undefined.
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end;
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end;
96ad5d81ee4a2cc66a4ae19893efc8aa6d06fae7jailletc; Copyright (C) 2006-2013 Oracle Corporation
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end;
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; This file is part of VirtualBox Open Source Edition (OSE), as
d29d9ab4614ff992b0e8de6e2b88d52b6f1f153erbowen; available from http://www.virtualbox.org. This file is free software;
2e545ce2450a9953665f701bb05350f0d3f26275nd; you can redistribute it and/or modify it under the terms of the GNU
d29d9ab4614ff992b0e8de6e2b88d52b6f1f153erbowen; General Public License (GPL) as published by the Free Software
d29d9ab4614ff992b0e8de6e2b88d52b6f1f153erbowen; Foundation, in version 2 as it comes in the "COPYING" file of the
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
af33a4994ae2ff15bc67d19ff1a7feb906745bf8rbowen;
3f08db06526d6901aa08c110b5bc7dde6bc39905nd
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end%ifndef VBOX_WITH_OLD_VTX_CODE
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end %ifdef RT_ARCH_AMD64
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ;;
3f08db06526d6901aa08c110b5bc7dde6bc39905nd ; Keep these macro definitions in this file as it gets included and compiled
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once.
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end %define VMX_SKIP_GDTR
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end %ifndef RT_OS_DARWIN
af84459fbf938e508fd10b01cb8d699c79083813takashi ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always. See @bugref{6875}.
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end %define VMX_SKIP_IDTR
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end %endif
f086b4b402fa9a2fefc7dda85de2a3cc1cd0a654rjung %define VMX_SKIP_TR
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end %endif
9f1e24d6556e511a871fe4a354b955f549fda6c5sf%endif
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun;/**
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; *
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; * @returns VBox status code
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; * @param fResume x86:[ebp+8], msc:rcx,gcc:rdi vmlauch/vmresume
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; * @param pCtx x86:[ebp+c], msc:rdx,gcc:rsi Guest context
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; * @param pCache x86:[esp+10],msc:r8, gcc:rdx VMCS cache
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end; */
30471a4650391f57975f60bbb6e4a90be7b284bfhumbedoohALIGNCODE(16)
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1endBEGINPROC MY_NAME(VMXR0StartVM32)
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end push xBP
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end mov xBP, xSP
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end pushf
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end cli
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; Save all general purpose host registers.
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun MYPUSHAD
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; First we have to save some final CPU context registers.
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov eax, VMX_VMCS_HOST_RIP
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%ifdef RT_ARCH_AMD64
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun lea r10, [.vmlaunch_done wrt rip]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun vmwrite rax, r10
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun%else
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov ecx, .vmlaunch_done
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun vmwrite eax, ecx
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%endif
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; Note: assumes success!
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ; Save the Guest CPU context pointer.
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end%ifdef RT_ARCH_AMD64
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end %ifdef ASM_CALL64_GCC
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ; fResume already in rdi
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ; pCtx already in rsi
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov rbx, rdx ; pCache
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun %else
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov rdi, rcx ; fResume
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov rsi, rdx ; pCtx
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov rbx, r8 ; pCache
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun %endif
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%else
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov edi, [ebp + 8] ; fResume
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov esi, [ebp + 12] ; pCtx
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov ebx, [ebp + 16] ; pCache
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%endif
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; Save segment registers.
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun MYPUSHSEGS xAX, ax
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun%ifdef VMX_USE_CACHED_VMCS_ACCESSES
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun cmp ecx, 0
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun je .no_cached_writes
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov edx, ecx
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov ecx, 0
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun jmp .cached_write
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgunALIGN(16)
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun.cached_write:
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun inc xCX
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun cmp xCX, xDX
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun jl .cached_write
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun.no_cached_writes:
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun
94cfb5d816f18c39adb74a03b6502ab73e35a73bnilgun ; Save the pCache pointer.
94cfb5d816f18c39adb74a03b6502ab73e35a73bnilgun push xBX
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%endif
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; Save the pCtx pointer.
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun push xSI
94cfb5d816f18c39adb74a03b6502ab73e35a73bnilgun
94cfb5d816f18c39adb74a03b6502ab73e35a73bnilgun ; Save LDTR.
94cfb5d816f18c39adb74a03b6502ab73e35a73bnilgun xor eax, eax
94cfb5d816f18c39adb74a03b6502ab73e35a73bnilgun sldt ax
94cfb5d816f18c39adb74a03b6502ab73e35a73bnilgun push xAX
94cfb5d816f18c39adb74a03b6502ab73e35a73bnilgun
94cfb5d816f18c39adb74a03b6502ab73e35a73bnilgun%ifndef VMX_SKIP_TR
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ; The TR limit is reset to 0x67; restore it manually.
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end str eax
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end push xAX
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end%endif
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end%ifndef VMX_SKIP_GDTR
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun sub xSP, xCB * 2
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun sgdt [xSP]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%endif
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%ifndef VMX_SKIP_IDTR
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun sub xSP, xCB * 2
b244bbf442a0aea3dc397b4d0d751f4716c5891dnd sidt [xSP]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%endif
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov xBX, [xSI + CPUMCTX.cr2]
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov xDX, cr2
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun cmp xBX, xDX
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun je .skipcr2write32
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov cr2, xBX
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun.skipcr2write32:
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov eax, VMX_VMCS_HOST_RSP
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun vmwrite xAX, xSP
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun ; Note: assumes success!
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ; Don't mess with ESP anymore!!!
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ; Load Guest's general purpose registers.
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end mov eax, [xSI + CPUMCTX.eax]
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end mov ebx, [xSI + CPUMCTX.ebx]
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end mov ecx, [xSI + CPUMCTX.ecx]
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end mov edx, [xSI + CPUMCTX.edx]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov ebp, [xSI + CPUMCTX.ebp]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; Resume or start?
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun cmp xDI, 0 ; fResume
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun je .vmlaunch_launch
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; Restore edi & esi.
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov edi, [xSI + CPUMCTX.edi]
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun mov esi, [xSI + CPUMCTX.esi]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun vmresume
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun jmp .vmlaunch_done; ; Here if vmresume detected a failure.
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun.vmlaunch_launch:
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun ; Restore edi & esi.
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov edi, [xSI + CPUMCTX.edi]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov esi, [xSI + CPUMCTX.esi]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun vmlaunch
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun jmp .vmlaunch_done; ; Here if vmlaunch detected a failure.
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1endALIGNCODE(16) ;; @todo YASM BUG - this alignment is wrong on darwin, it's 1 byte off.
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end.vmlaunch_done:
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end jc near .vmxstart_invalid_vmcs_ptr
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end jz near .vmxstart_start_failed
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end ; Restore base and limit of the IDTR & GDTR.
d03cac4d8ba79a23cfda410d35b614b0d805ba4cnilgun%ifndef VMX_SKIP_IDTR
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun lidt [xSP]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun add xSP, xCB * 2
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%endif
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%ifndef VMX_SKIP_GDTR
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun lgdt [xSP]
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun add xSP, xCB * 2
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%endif
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun push xDI
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%ifndef VMX_SKIP_TR
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR).
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%else
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR).
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun%endif
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov [ss:xDI + CPUMCTX.eax], eax
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov [ss:xDI + CPUMCTX.ebx], ebx
3a54eab60d4d0057c6fa5c5a5f95238dd60ecbd7nilgun mov [ss:xDI + CPUMCTX.ecx], ecx
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end mov [ss:xDI + CPUMCTX.edx], edx
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end mov [ss:xDI + CPUMCTX.esi], esi
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end mov [ss:xDI + CPUMCTX.ebp], ebp
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end%ifndef VBOX_WITH_OLD_VTX_CODE
af84459fbf938e508fd10b01cb8d699c79083813takashi mov xAX, cr2
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end mov [ss:xDI + CPUMCTX.cr2], xAX
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end%endif
f086b4b402fa9a2fefc7dda85de2a3cc1cd0a654rjung
727872d18412fc021f03969b8641810d8896820bhumbedooh%ifdef RT_ARCH_AMD64
0d0ba3a410038e179b695446bb149cce6264e0abnd pop xAX ; The guest edi we pushed above.
727872d18412fc021f03969b8641810d8896820bhumbedooh mov dword [ss:xDI + CPUMCTX.edi], eax
cc7e1025de9ac63bd4db6fe7f71c158b2cf09fe4humbedooh%else
0d0ba3a410038e179b695446bb149cce6264e0abnd pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above.
cc7e1025de9ac63bd4db6fe7f71c158b2cf09fe4humbedooh%endif
727872d18412fc021f03969b8641810d8896820bhumbedooh
0d0ba3a410038e179b695446bb149cce6264e0abnd%ifndef VMX_SKIP_TR
0d0ba3a410038e179b695446bb149cce6264e0abnd ; Restore TSS selector; must mark it as not busy before using ltr (!)
0d0ba3a410038e179b695446bb149cce6264e0abnd ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
ac082aefa89416cbdc9a1836eaf3bed9698201c8humbedooh ; @todo get rid of sgdt
0d0ba3a410038e179b695446bb149cce6264e0abnd pop xBX ; Saved TR
0d0ba3a410038e179b695446bb149cce6264e0abnd sub xSP, xCB * 2
0d0ba3a410038e179b695446bb149cce6264e0abnd sgdt [xSP]
727872d18412fc021f03969b8641810d8896820bhumbedooh mov xAX, xBX
0d0ba3a410038e179b695446bb149cce6264e0abnd and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
0d0ba3a410038e179b695446bb149cce6264e0abnd add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
30471a4650391f57975f60bbb6e4a90be7b284bfhumbedooh and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
205f749042ed530040a4f0080dbcb47ceae8a374rjung ltr bx
af33a4994ae2ff15bc67d19ff1a7feb906745bf8rbowen add xSP, xCB * 2
0d0ba3a410038e179b695446bb149cce6264e0abnd%endif
7fec19672a491661b2fe4b29f685bc7f4efa64d4nd
7fec19672a491661b2fe4b29f685bc7f4efa64d4nd pop xAX ; Saved LDTR
7fec19672a491661b2fe4b29f685bc7f4efa64d4nd%ifdef RT_ARCH_AMD64
63f06dce77bb2d9b1c5aa5deeb47a1069987fd1end cmp xAX, 0
je .skipldtwrite32
%endif
lldt ax
.skipldtwrite32:
add xSP, xCB ; pCtx
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
pop xDX ; Saved pCache
mov ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
cmp ecx, 0 ; Can't happen
je .no_cached_reads
jmp .cached_read
ALIGN(16)
.cached_read:
dec xCX
mov eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
vmread [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
cmp xCX, 0
jnz .cached_read
.no_cached_reads:
%ifdef VBOX_WITH_OLD_VTX_CODE
; Restore CR2 into VMCS-cache field (for EPT).
mov xAX, cr2
mov [ss:xDX + VMCSCACHE.cr2], xAX
%endif
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore general purpose registers.
MYPOPAD
mov eax, VINF_SUCCESS
.vmstart_end:
popf
pop xBP
ret
.vmxstart_invalid_vmcs_ptr:
; Restore base and limit of the IDTR & GDTR
%ifndef VMX_SKIP_IDTR
lidt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_GDTR
lgdt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_TR
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
%endif
pop xAX ; Saved LDTR
lldt ax ; Don't bother with conditional restoration in the error case.
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
add xSP, xCB * 2 ; pCtx + pCache
%else
add xSP, xCB ; pCtx
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore all general purpose host registers.
MYPOPAD
mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
jmp .vmstart_end
.vmxstart_start_failed:
; Restore base and limit of the IDTR & GDTR.
%ifndef VMX_SKIP_IDTR
lidt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_GDTR
lgdt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_TR
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [ss:xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
%endif
pop xAX ; Saved LDTR
lldt ax ; Don't bother with conditional restoration in the error case
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
add xSP, xCB * 2 ; pCtx + pCache
%else
add xSP, xCB ; pCtx
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore all general purpose host registers.
MYPOPAD
mov eax, VERR_VMX_UNABLE_TO_START_VM
jmp .vmstart_end
ENDPROC MY_NAME(VMXR0StartVM32)
%ifdef RT_ARCH_AMD64
;/**
; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
; *
; * @returns VBox status code
; * @param fResume msc:rcx, gcc:rdi vmlauch/vmresume
; * @param pCtx msc:rdx, gcc:rsi Guest context
; * @param pCache msc:r8, gcc:rdx VMCS cache
; */
ALIGNCODE(16)
BEGINPROC MY_NAME(VMXR0StartVM64)
push xBP
mov xBP, xSP
pushf
cli
; Save all general purpose host registers.
MYPUSHAD
; First we have to save some final CPU context registers.
lea r10, [.vmlaunch64_done wrt rip]
mov rax, VMX_VMCS_HOST_RIP ; Return address (too difficult to continue after VMLAUNCH?).
vmwrite rax, r10
; Note: assumes success!
; Save the Guest CPU context pointer.
%ifdef ASM_CALL64_GCC
; fResume already in rdi
; pCtx already in rsi
mov rbx, rdx ; pCache
%else
mov rdi, rcx ; fResume
mov rsi, rdx ; pCtx
mov rbx, r8 ; pCache
%endif
; Save segment registers.
; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
MYPUSHSEGS xAX, ax
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
mov ecx, [xBX + VMCSCACHE.Write.cValidEntries]
cmp ecx, 0
je .no_cached_writes
mov edx, ecx
mov ecx, 0
jmp .cached_write
ALIGN(16)
.cached_write:
mov eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
inc xCX
cmp xCX, xDX
jl .cached_write
mov dword [xBX + VMCSCACHE.Write.cValidEntries], 0
.no_cached_writes:
; Save the pCache pointer.
push xBX
%endif
%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
; Save the host MSRs and load the guest MSRs.
LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
%else
%ifdef VBOX_WITH_OLD_VTX_CODE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
%endif
%endif
; Save the pCtx pointer.
push xSI
; Save LDTR.
xor eax, eax
sldt ax
push xAX
%ifndef VMX_SKIP_TR
; The TR limit is reset to 0x67; restore it manually.
str eax
push xAX
%endif
; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
%ifndef VMX_SKIP_GDTR
sub xSP, xCB * 2
sgdt [xSP]
%endif
%ifndef VMX_SKIP_IDTR
sub xSP, xCB * 2
sidt [xSP]
%endif
; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
mov rbx, qword [xSI + CPUMCTX.cr2]
mov rdx, cr2
cmp rbx, rdx
je .skipcr2write
mov cr2, rbx
.skipcr2write:
mov eax, VMX_VMCS_HOST_RSP
vmwrite xAX, xSP
; Note: assumes success!
; Don't mess with ESP anymore!!!
; Restore Guest's general purpose registers.
mov rax, qword [xSI + CPUMCTX.eax]
mov rbx, qword [xSI + CPUMCTX.ebx]
mov rcx, qword [xSI + CPUMCTX.ecx]
mov rdx, qword [xSI + CPUMCTX.edx]
mov rbp, qword [xSI + CPUMCTX.ebp]
mov r8, qword [xSI + CPUMCTX.r8]
mov r9, qword [xSI + CPUMCTX.r9]
mov r10, qword [xSI + CPUMCTX.r10]
mov r11, qword [xSI + CPUMCTX.r11]
mov r12, qword [xSI + CPUMCTX.r12]
mov r13, qword [xSI + CPUMCTX.r13]
mov r14, qword [xSI + CPUMCTX.r14]
mov r15, qword [xSI + CPUMCTX.r15]
; Resume or start?
cmp xDI, 0 ; fResume
je .vmlaunch64_launch
; Restore edi & esi.
mov rdi, qword [xSI + CPUMCTX.edi]
mov rsi, qword [xSI + CPUMCTX.esi]
vmresume
jmp .vmlaunch64_done; ; Here if vmresume detected a failure.
.vmlaunch64_launch:
; Restore rdi & rsi.
mov rdi, qword [xSI + CPUMCTX.edi]
mov rsi, qword [xSI + CPUMCTX.esi]
vmlaunch
jmp .vmlaunch64_done; ; Here if vmlaunch detected a failure.
ALIGNCODE(16)
.vmlaunch64_done:
jc near .vmxstart64_invalid_vmcs_ptr
jz near .vmxstart64_start_failed
; Restore base and limit of the IDTR & GDTR
%ifndef VMX_SKIP_IDTR
lidt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_GDTR
lgdt [xSP]
add xSP, xCB * 2
%endif
push xDI
%ifndef VMX_SKIP_TR
mov xDI, [xSP + xCB * 3] ; pCtx (*3 to skip the saved xDI, TR, LDTR)
%else
mov xDI, [xSP + xCB * 2] ; pCtx (*2 to skip the saved xDI, LDTR)
%endif
mov qword [xDI + CPUMCTX.eax], rax
mov qword [xDI + CPUMCTX.ebx], rbx
mov qword [xDI + CPUMCTX.ecx], rcx
mov qword [xDI + CPUMCTX.edx], rdx
mov qword [xDI + CPUMCTX.esi], rsi
mov qword [xDI + CPUMCTX.ebp], rbp
mov qword [xDI + CPUMCTX.r8], r8
mov qword [xDI + CPUMCTX.r9], r9
mov qword [xDI + CPUMCTX.r10], r10
mov qword [xDI + CPUMCTX.r11], r11
mov qword [xDI + CPUMCTX.r12], r12
mov qword [xDI + CPUMCTX.r13], r13
mov qword [xDI + CPUMCTX.r14], r14
mov qword [xDI + CPUMCTX.r15], r15
%ifndef VBOX_WITH_OLD_VTX_CODE
mov rax, cr2
mov qword [xDI + CPUMCTX.cr2], rax
%endif
pop xAX ; The guest edi we pushed above
mov qword [xDI + CPUMCTX.edi], rax
%ifndef VMX_SKIP_TR
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
%endif
pop xAX ; Saved LDTR
cmp xAX, 0
je .skipldtwrite64
lldt ax
.skipldtwrite64:
pop xSI ; pCtx (needed in rsi by the macros below)
%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
; Save the guest MSRs and load the host MSRs.
LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
%else
%ifdef VBOX_WITH_OLD_VTX_CODE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
%endif
%endif
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
pop xDX ; Saved pCache
mov ecx, [xDX + VMCSCACHE.Read.cValidEntries]
cmp ecx, 0 ; Can't happen
je .no_cached_reads
jmp .cached_read
ALIGN(16)
.cached_read:
dec xCX
mov eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
vmread [xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
cmp xCX, 0
jnz .cached_read
.no_cached_reads:
%ifdef VBOX_WITH_OLD_VTX_CODE
; Restore CR2 into VMCS-cache field (for EPT).
mov xAX, cr2
mov [xDX + VMCSCACHE.cr2], xAX
%endif
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore general purpose registers.
MYPOPAD
mov eax, VINF_SUCCESS
.vmstart64_end:
popf
pop xBP
ret
.vmxstart64_invalid_vmcs_ptr:
; Restore base and limit of the IDTR & GDTR.
%ifndef VMX_SKIP_IDTR
lidt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_GDTR
lgdt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_TR
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
%endif
pop xAX ; Saved LDTR
lldt ax ; Don't bother with conditional restoration in the error case.
pop xSI ; pCtx (needed in rsi by the macros below)
%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
LOADHOSTMSR MSR_K8_SF_MASK
LOADHOSTMSR MSR_K6_STAR
LOADHOSTMSR MSR_K8_LSTAR
%else
%ifdef VBOX_WITH_OLD_VTX_CODE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
%endif
%endif
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
add xSP, xCB ; pCache
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore all general purpose host registers.
MYPOPAD
mov eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
jmp .vmstart64_end
.vmxstart64_start_failed:
; Restore base and limit of the IDTR & GDTR.
%ifndef VMX_SKIP_IDTR
lidt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_GDTR
lgdt [xSP]
add xSP, xCB * 2
%endif
%ifndef VMX_SKIP_TR
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
; @todo get rid of sgdt
pop xBX ; Saved TR
sub xSP, xCB * 2
sgdt [xSP]
mov xAX, xBX
and al, 0F8h ; Mask away TI and RPL bits, get descriptor offset.
add xAX, [xSP + 2] ; eax <- GDTR.address + descriptor offset.
and dword [xAX + 4], ~0200h ; Clear busy flag (2nd type2 bit).
ltr bx
add xSP, xCB * 2
%endif
pop xAX ; Saved LDTR
lldt ax ; Don't bother with conditional restoration in the error case.
pop xSI ; pCtx (needed in rsi by the macros below).
%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
LOADHOSTMSR MSR_K8_SF_MASK
LOADHOSTMSR MSR_K6_STAR
LOADHOSTMSR MSR_K8_LSTAR
%else
%ifdef VBOX_WITH_OLD_VTX_CODE
; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}
LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
%endif
%endif
%ifdef VMX_USE_CACHED_VMCS_ACCESSES
add xSP, xCB ; pCache
%endif
; Restore segment registers.
MYPOPSEGS xAX, ax
; Restore all general purpose host registers.
MYPOPAD
mov eax, VERR_VMX_UNABLE_TO_START_VM
jmp .vmstart64_end
ENDPROC MY_NAME(VMXR0StartVM64)
%endif ; RT_ARCH_AMD64
;/**
; * Prepares for and executes VMRUN (32 bits guests)
; *
; * @returns VBox status code
; * @param HCPhysVMCB Physical address of host VMCB
; * @param HCPhysVMCB Physical address of guest VMCB
; * @param pCtx Guest context
; */
ALIGNCODE(16)
BEGINPROC MY_NAME(SVMR0VMRun)
%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
%ifdef ASM_CALL64_GCC
push rdx
push rsi
push rdi
%else
push r8
push rdx
push rcx
%endif
push 0
%endif
push xBP
mov xBP, xSP
pushf
; Save all general purpose host registers.
MYPUSHAD
; Save the Guest CPU context pointer.
mov xSI, [xBP + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
push xSI ; push for saving the state at the end
; Save host fs, gs, sysenter msr etc.
mov xAX, [xBP + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
push xAX ; save for the vmload after vmrun
vmsave
; Setup eax for VMLOAD.
mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
; Restore Guest's general purpose registers.
; eax is loaded from the VMCB by VMRUN.
mov ebx, [xSI + CPUMCTX.ebx]
mov ecx, [xSI + CPUMCTX.ecx]
mov edx, [xSI + CPUMCTX.edx]
mov edi, [xSI + CPUMCTX.edi]
mov ebp, [xSI + CPUMCTX.ebp]
mov esi, [xSI + CPUMCTX.esi]
; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
clgi
sti
; Load guest fs, gs, sysenter msr etc.
vmload
; Run the VM.
vmrun
; eax is in the VMCB already; we can use it here.
; Save guest fs, gs, sysenter msr etc.
vmsave
; Load host fs, gs, sysenter msr etc.
pop xAX ; Pushed above
vmload
; Set the global interrupt flag again, but execute cli to make sure IF=0.
cli
stgi
pop xAX ; pCtx
mov [ss:xAX + CPUMCTX.ebx], ebx
mov [ss:xAX + CPUMCTX.ecx], ecx
mov [ss:xAX + CPUMCTX.edx], edx
mov [ss:xAX + CPUMCTX.esi], esi
mov [ss:xAX + CPUMCTX.edi], edi
mov [ss:xAX + CPUMCTX.ebp], ebp
; Restore general purpose registers.
MYPOPAD
mov eax, VINF_SUCCESS
popf
pop xBP
%ifdef RT_ARCH_AMD64
add xSP, 4*xCB
%endif
ret
ENDPROC MY_NAME(SVMR0VMRun)
%ifdef RT_ARCH_AMD64
;/**
; * Prepares for and executes VMRUN (64 bits guests)
; *
; * @returns VBox status code
; * @param HCPhysVMCB Physical address of host VMCB
; * @param HCPhysVMCB Physical address of guest VMCB
; * @param pCtx Guest context
; */
ALIGNCODE(16)
BEGINPROC MY_NAME(SVMR0VMRun64)
; Fake a cdecl stack frame
%ifdef ASM_CALL64_GCC
push rdx
push rsi
push rdi
%else
push r8
push rdx
push rcx
%endif
push 0
push rbp
mov rbp, rsp
pushf
; Manual save and restore:
; - General purpose registers except RIP, RSP, RAX
;
; Trashed:
; - CR2 (we don't care)
; - LDTR (reset to 0)
; - DRx (presumably not changed at all)
; - DR7 (reset to 0x400)
;
; Save all general purpose host registers.
MYPUSHAD
; Save the Guest CPU context pointer.
mov rsi, [rbp + xCB * 2 + RTHCPHYS_CB * 2] ; pCtx
push rsi ; push for saving the state at the end
; Save host fs, gs, sysenter msr etc.
mov rax, [rbp + xCB * 2] ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
push rax ; Save for the vmload after vmrun
vmsave
; Setup eax for VMLOAD.
mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; pVMCBPhys (64 bits physical address; take low dword only)
; Restore Guest's general purpose registers.
; rax is loaded from the VMCB by VMRUN.
mov rbx, qword [xSI + CPUMCTX.ebx]
mov rcx, qword [xSI + CPUMCTX.ecx]
mov rdx, qword [xSI + CPUMCTX.edx]
mov rdi, qword [xSI + CPUMCTX.edi]
mov rbp, qword [xSI + CPUMCTX.ebp]
mov r8, qword [xSI + CPUMCTX.r8]
mov r9, qword [xSI + CPUMCTX.r9]
mov r10, qword [xSI + CPUMCTX.r10]
mov r11, qword [xSI + CPUMCTX.r11]
mov r12, qword [xSI + CPUMCTX.r12]
mov r13, qword [xSI + CPUMCTX.r13]
mov r14, qword [xSI + CPUMCTX.r14]
mov r15, qword [xSI + CPUMCTX.r15]
mov rsi, qword [xSI + CPUMCTX.esi]
; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch.
clgi
sti
; Load guest fs, gs, sysenter msr etc.
vmload
; Run the VM.
vmrun
; rax is in the VMCB already; we can use it here.
; Save guest fs, gs, sysenter msr etc.
vmsave
; Load host fs, gs, sysenter msr etc.
pop rax ; pushed above
vmload
; Set the global interrupt flag again, but execute cli to make sure IF=0.
cli
stgi
pop rax ; pCtx
mov qword [rax + CPUMCTX.ebx], rbx
mov qword [rax + CPUMCTX.ecx], rcx
mov qword [rax + CPUMCTX.edx], rdx
mov qword [rax + CPUMCTX.esi], rsi
mov qword [rax + CPUMCTX.edi], rdi
mov qword [rax + CPUMCTX.ebp], rbp
mov qword [rax + CPUMCTX.r8], r8
mov qword [rax + CPUMCTX.r9], r9
mov qword [rax + CPUMCTX.r10], r10
mov qword [rax + CPUMCTX.r11], r11
mov qword [rax + CPUMCTX.r12], r12
mov qword [rax + CPUMCTX.r13], r13
mov qword [rax + CPUMCTX.r14], r14
mov qword [rax + CPUMCTX.r15], r15
; Restore general purpose registers.
MYPOPAD
mov eax, VINF_SUCCESS
popf
pop rbp
add rsp, 4 * xCB
ret
ENDPROC MY_NAME(SVMR0VMRun64)
%endif ; RT_ARCH_AMD64