LegacyandAMD64.mac revision a0b1cef8d9b4f05e3ae266775a3b71d7b9147284
2N/A; VMM - World Switchers, 32Bit to AMD64.
2N/A;
2N/A
2N/A;
2N/A; Copyright (C) 2006-2007 Oracle Corporation
2N/A;
2N/A; This file is part of VirtualBox Open Source Edition (OSE), as
2N/A; available from http://www.virtualbox.org. This file is free software;
2N/A; you can redistribute it and/or modify it under the terms of the GNU
2N/A; General Public License (GPL) as published by the Free Software
2N/A; Foundation, in version 2 as it comes in the "COPYING" file of the
2N/A; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
2N/A; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
2N/A;
2N/A
2N/A;%define DEBUG_STUFF 1
2N/A;%define STRICT_IF 1
2N/A
2N/A;*******************************************************************************
2N/A;* Defined Constants And Macros *
2N/A;*******************************************************************************
2N/A
2N/A
2N/A;*******************************************************************************
2N/A;* Header Files *
2N/A;*******************************************************************************
2N/A%include "VBox/asmdefs.mac"
2N/A%include "VBox/x86.mac"
2N/A%include "VBox/cpum.mac"
2N/A%include "VBox/stam.mac"
2N/A%include "VBox/vm.mac"
2N/A%include "CPUMInternal.mac"
2N/A%include "VMMSwitcher/VMMSwitcher.mac"
2N/A
2N/A
2N/A;
2N/A; Start the fixup records
2N/A; We collect the fixups in the .data section as we go along
2N/A; It is therefore VITAL that no-one is using the .data section
2N/A; for anything else between 'Start' and 'End'.
2N/A;
2N/ABEGINDATA
2N/AGLOBALNAME Fixups
2N/A
2N/A
2N/A
2N/ABEGINCODE
2N/AGLOBALNAME Start
2N/A
2N/ABITS 32
2N/A
2N/A;;
2N/A; The C interface.
2N/A;
2N/ABEGINPROC vmmR0HostToGuest
2N/A %ifdef DEBUG_STUFF
2N/A COM32_S_NEWLINE
2N/A COM32_S_CHAR '^'
2N/A %endif
2N/A
2N/A %ifdef VBOX_WITH_STATISTICS
2N/A ;
2N/A ; Switcher stats.
2N/A ;
2N/A FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC
2N/A mov edx, 0ffffffffh
2N/A STAM_PROFILE_ADV_START edx
2N/A %endif
2N/A
2N/A ; turn off interrupts
2N/A pushf
2N/A cli
2N/A
2N/A ;
2N/A ; Call worker.
2N/A ;
2N/A FIXUP FIX_HC_CPUM_OFF, 1, 0
2N/A mov edx, 0ffffffffh
2N/A push cs ; allow for far return and restore cs correctly.
2N/A call NAME(vmmR0HostToGuestAsm)
2N/A
2N/A ; restore original flags
2N/A popf
2N/A
2N/A%ifdef VBOX_WITH_STATISTICS
2N/A ;
2N/A ; Switcher stats.
2N/A ;
2N/A FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC
2N/A mov edx, 0ffffffffh
2N/A STAM_PROFILE_ADV_STOP edx
2N/A%endif
2N/A
2N/A ret
2N/A
2N/AENDPROC vmmR0HostToGuest
2N/A
2N/A; *****************************************************************************
2N/A; vmmR0HostToGuestAsm
2N/A;
2N/A; Phase one of the switch from host to guest context (host MMU context)
2N/A;
2N/A; INPUT:
2N/A; - edx virtual address of CPUM structure (valid in host context)
2N/A;
2N/A; USES/DESTROYS:
2N/A; - eax, ecx, edx, esi
2N/A;
2N/A; ASSUMPTION:
2N/A; - current CS and DS selectors are wide open
2N/A;
2N/A; *****************************************************************************
2N/AALIGNCODE(16)
2N/ABEGINPROC vmmR0HostToGuestAsm
2N/A ;;
2N/A ;; Save CPU host context
2N/A ;; Skip eax, edx and ecx as these are not preserved over calls.
2N/A ;;
2N/A CPUMCPU_FROM_CPUM(edx)
2N/A%ifdef VBOX_WITH_CRASHDUMP_MAGIC
2N/A ; phys address of scratch page
2N/A mov eax, dword [edx + CPUMCPU.Guest.dr + 4*8]
2N/A mov cr2, eax
2N/A
2N/A mov dword [edx + CPUMCPU.Guest.dr + 4*8], 1
2N/A%endif
2N/A
2N/A ; general registers.
2N/A mov [edx + CPUMCPU.Host.ebx], ebx
2N/A mov [edx + CPUMCPU.Host.edi], edi
2N/A mov [edx + CPUMCPU.Host.esi], esi
2N/A mov [edx + CPUMCPU.Host.esp], esp
2N/A mov [edx + CPUMCPU.Host.ebp], ebp
2N/A ; selectors.
2N/A mov [edx + CPUMCPU.Host.ds], ds
2N/A mov [edx + CPUMCPU.Host.es], es
2N/A mov [edx + CPUMCPU.Host.fs], fs
2N/A mov [edx + CPUMCPU.Host.gs], gs
2N/A mov [edx + CPUMCPU.Host.ss], ss
2N/A ; special registers.
2N/A sldt [edx + CPUMCPU.Host.ldtr]
2N/A sidt [edx + CPUMCPU.Host.idtr]
2N/A sgdt [edx + CPUMCPU.Host.gdtr]
2N/A str [edx + CPUMCPU.Host.tr]
2N/A
2N/A%ifdef VBOX_WITH_CRASHDUMP_MAGIC
2N/A mov dword [edx + CPUMCPU.Guest.dr + 4*8], 2
2N/A%endif
2N/A
2N/A ; control registers.
2N/A mov eax, cr0
2N/A mov [edx + CPUMCPU.Host.cr0], eax
2N/A ;Skip cr2; assume host os don't stuff things in cr2. (safe)
2N/A mov eax, cr3
2N/A mov [edx + CPUMCPU.Host.cr3], eax
2N/A mov eax, cr4
2N/A mov [edx + CPUMCPU.Host.cr4], eax
2N/A
2N/A ; save the host EFER msr
2N/A mov ebx, edx
2N/A mov ecx, MSR_K6_EFER
2N/A rdmsr
2N/A mov [ebx + CPUMCPU.Host.efer], eax
2N/A mov [ebx + CPUMCPU.Host.efer + 4], edx
2N/A mov edx, ebx
2N/A
2N/A%ifdef VBOX_WITH_CRASHDUMP_MAGIC
2N/A mov dword [edx + CPUMCPU.Guest.dr + 4*8], 3
2N/A%endif
2N/A
2N/A ; Load new gdt so we can do a far jump after going into 64 bits mode
2N/A lgdt [edx + CPUMCPU.Hyper.gdtr]
2N/A
2N/A%ifdef VBOX_WITH_CRASHDUMP_MAGIC
2N/A mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
2N/A%endif
2N/A
2N/A ;;
2N/A ;; Load Intermediate memory context.
2N/A ;;
2N/A FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
2N/A mov eax, 0ffffffffh
2N/A mov cr3, eax
2N/A DEBUG_CHAR('?')
2N/A
2N/A ;;
2N/A ;; Jump to identity mapped location
2N/A ;;
2N/A FIXUP FIX_HC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
2N/A jmp near NAME(IDEnterTarget)
2N/A
2N/A
2N/A ; We're now on identity mapped pages!
2N/AALIGNCODE(16)
2N/AGLOBALNAME IDEnterTarget
2N/A DEBUG_CHAR('2')
2N/A
2N/A ; 1. Disable paging.
2N/A mov ebx, cr0
2N/A and ebx, ~X86_CR0_PG
2N/A mov cr0, ebx
2N/A DEBUG_CHAR('2')
2N/A
2N/A%ifdef VBOX_WITH_CRASHDUMP_MAGIC
2N/A mov eax, cr2
2N/A mov dword [eax], 3
2N/A%endif
2N/A
2N/A ; 2. Enable PAE.
2N/A mov ecx, cr4
2N/A or ecx, X86_CR4_PAE
2N/A mov cr4, ecx
2N/A
2N/A ; 3. Load long mode intermediate CR3.
2N/A FIXUP FIX_INTER_AMD64_CR3, 1
2N/A mov ecx, 0ffffffffh
2N/A mov cr3, ecx
2N/A DEBUG_CHAR('3')
2N/A
2N/A%ifdef VBOX_WITH_CRASHDUMP_MAGIC
2N/A mov eax, cr2
2N/A mov dword [eax], 4
2N/A%endif
2N/A
2N/A ; 4. Enable long mode.
2N/A mov ebp, edx
2N/A mov ecx, MSR_K6_EFER
2N/A rdmsr
2N/A or eax, MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE
2N/A; FIXUP FIX_EFER_OR_MASK
2N/A; or eax, 0
2N/A or eax, 0
2N/A and eax, ~(MSR_K6_EFER_FFXSR) ; turn off fast fxsave/fxrstor (skipping xmm regs)
2N/A wrmsr
2N/A mov edx, ebp
2N/A DEBUG_CHAR('4')
2N/A
2N/A%ifdef VBOX_WITH_CRASHDUMP_MAGIC
2N/A mov eax, cr2
2N/A mov dword [eax], 5
2N/A%endif
2N/A
2N/A ; 5. Enable paging.
2N/A or ebx, X86_CR0_PG
2N/A ; Disable ring 0 write protection too
2N/A and ebx, ~X86_CR0_WRITE_PROTECT
2N/A mov cr0, ebx
2N/A DEBUG_CHAR('5')
2N/A
2N/A ; Jump from compatibility mode to 64-bit mode.
2N/A FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
2N/A jmp 0ffffh:0fffffffeh
2N/A
2N/A ;
2N/A ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
2N/ABITS 64
2N/AALIGNCODE(16)
2N/ANAME(IDEnter64Mode):
2N/A DEBUG_CHAR('6')
2N/A jmp [NAME(pICEnterTarget) wrt rip]
2N/A
2N/A; 64-bit jump target
2N/ANAME(pICEnterTarget):
2N/AFIXUP FIX_HC_64BIT_NOCHECK, 0, NAME(ICEnterTarget) - NAME(Start)
2N/Adq 0ffffffffffffffffh
2N/A
2N/A; 64-bit pCpum address.
2N/ANAME(pCpumIC):
2N/AFIXUP FIX_GC_64_BIT_CPUM_OFF, 0, 0
2N/Adq 0ffffffffffffffffh
2N/A
2N/A%ifdef VBOX_WITH_CRASHDUMP_MAGIC
2N/ANAME(pMarker):
2N/Adb 'Switch_marker'
2N/A%endif
;
; When we arrive here we're in 64 bits mode in the intermediate context
;
ALIGNCODE(16)
GLOBALNAME ICEnterTarget
; Load CPUM pointer into rdx
mov rdx, [NAME(pCpumIC) wrt rip]
CPUMCPU_FROM_CPUM(edx)
mov rax, cs
mov ds, rax
mov es, rax
; Invalidate fs & gs
mov rax, 0
mov fs, rax
mov gs, rax
%ifdef VBOX_WITH_CRASHDUMP_MAGIC
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 5
%endif
; Setup stack; use the lss_esp, ss pair for lss
DEBUG_CHAR('7')
mov rsp, 0
mov eax, [rdx + CPUMCPU.Hyper.esp]
mov [rdx + CPUMCPU.Hyper.lss_esp], eax
lss esp, [rdx + CPUMCPU.Hyper.lss_esp]
%ifdef VBOX_WITH_CRASHDUMP_MAGIC
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 6
%endif
; load the hypervisor function address
mov r9, [rdx + CPUMCPU.Hyper.eip]
; Check if we need to restore the guest FPU state
mov esi, [rdx + CPUMCPU.fUseFlags] ; esi == use flags.
test esi, CPUM_SYNC_FPU_STATE
jz near gth_fpu_no
%ifdef VBOX_WITH_CRASHDUMP_MAGIC
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 7
%endif
mov rax, cr0
mov rcx, rax ; save old CR0
and rax, ~(X86_CR0_TS | X86_CR0_EM)
mov cr0, rax
fxrstor [rdx + CPUMCPU.Guest.fpu]
mov cr0, rcx ; and restore old CR0 again
and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_FPU_STATE
gth_fpu_no:
; Check if we need to restore the guest debug state
test esi, CPUM_SYNC_DEBUG_STATE
jz near gth_debug_no
%ifdef VBOX_WITH_CRASHDUMP_MAGIC
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 8
%endif
mov rax, qword [rdx + CPUMCPU.Guest.dr + 0*8]
mov dr0, rax
mov rax, qword [rdx + CPUMCPU.Guest.dr + 1*8]
mov dr1, rax
mov rax, qword [rdx + CPUMCPU.Guest.dr + 2*8]
mov dr2, rax
mov rax, qword [rdx + CPUMCPU.Guest.dr + 3*8]
mov dr3, rax
mov rax, qword [rdx + CPUMCPU.Guest.dr + 6*8]
mov dr6, rax ; not required for AMD-V
and dword [rdx + CPUMCPU.fUseFlags], ~CPUM_SYNC_DEBUG_STATE
gth_debug_no:
%ifdef VBOX_WITH_CRASHDUMP_MAGIC
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 9
%endif
; parameter for all helper functions (pCtx)
lea rsi, [rdx + CPUMCPU.Guest.fpu]
call r9
; Load CPUM pointer into rdx
mov rdx, [NAME(pCpumIC) wrt rip]
CPUMCPU_FROM_CPUM(edx)
%ifdef VBOX_WITH_CRASHDUMP_MAGIC
mov dword [rdx + CPUMCPU.Guest.dr + 4*8], 10
%endif
; Save the return code
mov dword [rdx + CPUMCPU.u32RetCode], eax
; now let's switch back
jmp NAME(VMMGCGuestToHostAsm) ; rax = returncode.
ENDPROC vmmR0HostToGuestAsm
;;
; Trampoline for doing a call when starting the hyper visor execution.
;
; Push any arguments to the routine.
; Push the argument frame size (cArg * 4).
; Push the call target (_cdecl convention).
; Push the address of this routine.
;
;
BITS 64
ALIGNCODE(16)
BEGINPROC vmmGCCallTrampoline
%ifdef DEBUG_STUFF
COM64_S_CHAR 'c'
COM64_S_CHAR 't'
COM64_S_CHAR '!'
%endif
int3
ENDPROC vmmGCCallTrampoline
;;
; The C interface.
;
BITS 64
ALIGNCODE(16)
BEGINPROC vmmGCGuestToHost
%ifdef DEBUG_STUFF
push rsi
COM_NEWLINE
DEBUG_CHAR('b')
DEBUG_CHAR('a')
DEBUG_CHAR('c')
DEBUG_CHAR('k')
DEBUG_CHAR('!')
COM_NEWLINE
pop rsi
%endif
int3
ENDPROC vmmGCGuestToHost
;;
; VMMGCGuestToHostAsm
;
; This is an alternative entry point which we'll be using
; when the we have saved the guest state already or we haven't
; been messing with the guest at all.
;
; @param eax Return code.
; @uses eax, edx, ecx (or it may use them in the future)
;
BITS 64
ALIGNCODE(16)
BEGINPROC VMMGCGuestToHostAsm
;; We're still in the intermediate memory context!
;;
;; Switch to compatibility mode, placing ourselves in identity mapped code.
;;
jmp far [NAME(fpIDEnterTarget) wrt rip]
; 16:32 Pointer to IDEnterTarget.
NAME(fpIDEnterTarget):
FIXUP FIX_ID_32BIT, 0, NAME(IDExitTarget) - NAME(Start)
dd 0
FIXUP FIX_HYPER_CS, 0
dd 0
; We're now on identity mapped pages!
ALIGNCODE(16)
GLOBALNAME IDExitTarget
BITS 32
DEBUG_CHAR('1')
; 1. Deactivate long mode by turning off paging.
mov ebx, cr0
and ebx, ~X86_CR0_PG
mov cr0, ebx
DEBUG_CHAR('2')
; 2. Load intermediate page table.
FIXUP SWITCHER_FIX_INTER_CR3_HC, 1
mov edx, 0ffffffffh
mov cr3, edx
DEBUG_CHAR('3')
; 3. Disable long mode.
mov ecx, MSR_K6_EFER
rdmsr
DEBUG_CHAR('5')
and eax, ~(MSR_K6_EFER_LME)
wrmsr
DEBUG_CHAR('6')
%ifndef NEED_PAE_ON_HOST
; 3b. Disable PAE.
mov eax, cr4
and eax, ~X86_CR4_PAE
mov cr4, eax
DEBUG_CHAR('7')
%endif
; 4. Enable paging.
or ebx, X86_CR0_PG
mov cr0, ebx
jmp short just_a_jump
just_a_jump:
DEBUG_CHAR('8')
;;
;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
;;
FIXUP FIX_ID_2_HC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
jmp near NAME(ICExitTarget)
;;
;; When we arrive at this label we're at the
;; intermediate mapping of the switching code.
;;
BITS 32
ALIGNCODE(16)
GLOBALNAME ICExitTarget
DEBUG_CHAR('8')
; load the hypervisor data selector into ds & es
FIXUP FIX_HYPER_DS, 1
mov eax, 0ffffh
mov ds, eax
mov es, eax
FIXUP FIX_GC_CPUMCPU_OFF, 1, 0
mov edx, 0ffffffffh
mov esi, [edx + CPUMCPU.Host.cr3]
mov cr3, esi
;; now we're in host memory context, let's restore regs
FIXUP FIX_HC_CPUM_OFF, 1, 0
mov edx, 0ffffffffh
CPUMCPU_FROM_CPUM(edx)
; restore the host EFER
mov ebx, edx
mov ecx, MSR_K6_EFER
mov eax, [ebx + CPUMCPU.Host.efer]
mov edx, [ebx + CPUMCPU.Host.efer + 4]
wrmsr
mov edx, ebx
; activate host gdt and idt
lgdt [edx + CPUMCPU.Host.gdtr]
DEBUG_CHAR('0')
lidt [edx + CPUMCPU.Host.idtr]
DEBUG_CHAR('1')
; Restore TSS selector; must mark it as not busy before using ltr (!)
; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
movzx eax, word [edx + CPUMCPU.Host.tr] ; eax <- TR
and al, 0F8h ; mask away TI and RPL bits, get descriptor offset.
add eax, [edx + CPUMCPU.Host.gdtr + 2] ; eax <- GDTR.address + descriptor offset.
and dword [eax + 4], ~0200h ; clear busy flag (2nd type2 bit)
ltr word [edx + CPUMCPU.Host.tr]
; activate ldt
DEBUG_CHAR('2')
lldt [edx + CPUMCPU.Host.ldtr]
; Restore segment registers
mov eax, [edx + CPUMCPU.Host.ds]
mov ds, eax
mov eax, [edx + CPUMCPU.Host.es]
mov es, eax
mov eax, [edx + CPUMCPU.Host.fs]
mov fs, eax
mov eax, [edx + CPUMCPU.Host.gs]
mov gs, eax
; restore stack
lss esp, [edx + CPUMCPU.Host.esp]
; Control registers.
mov ecx, [edx + CPUMCPU.Host.cr4]
mov cr4, ecx
mov ecx, [edx + CPUMCPU.Host.cr0]
mov cr0, ecx
;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
;mov cr2, ecx
; restore general registers.
mov edi, [edx + CPUMCPU.Host.edi]
mov esi, [edx + CPUMCPU.Host.esi]
mov ebx, [edx + CPUMCPU.Host.ebx]
mov ebp, [edx + CPUMCPU.Host.ebp]
; store the return code in eax
mov eax, [edx + CPUMCPU.u32RetCode]
retf
ENDPROC VMMGCGuestToHostAsm
;;
; VMMGCGuestToHostAsmHyperCtx
;
; This is an alternative entry point which we'll be using
; when the we have the hypervisor context and need to save
; that before going to the host.
;
; This is typically useful when abandoning the hypervisor
; because of a trap and want the trap state to be saved.
;
; @param eax Return code.
; @param ecx Points to CPUMCTXCORE.
; @uses eax,edx,ecx
ALIGNCODE(16)
BEGINPROC VMMGCGuestToHostAsmHyperCtx
int3
;;
; VMMGCGuestToHostAsmGuestCtx
;
; Switches from Guest Context to Host Context.
; Of course it's only called from within the GC.
;
; @param eax Return code.
; @param esp + 4 Pointer to CPUMCTXCORE.
;
; @remark ASSUMES interrupts disabled.
;
ALIGNCODE(16)
BEGINPROC VMMGCGuestToHostAsmGuestCtx
int3
GLOBALNAME End
;
; The description string (in the text section).
;
NAME(Description):
db SWITCHER_DESCRIPTION
db 0
extern NAME(Relocate)
;
; End the fixup records.
;
BEGINDATA
db FIX_THE_END ; final entry.
GLOBALNAME FixupsEnd
;;
; The switcher definition structure.
ALIGNDATA(16)
GLOBALNAME Def
istruc VMMSWITCHERDEF
at VMMSWITCHERDEF.pvCode, RTCCPTR_DEF NAME(Start)
at VMMSWITCHERDEF.pvFixups, RTCCPTR_DEF NAME(Fixups)
at VMMSWITCHERDEF.pszDesc, RTCCPTR_DEF NAME(Description)
at VMMSWITCHERDEF.pfnRelocate, RTCCPTR_DEF NAME(Relocate)
at VMMSWITCHERDEF.enmType, dd SWITCHER_TYPE
at VMMSWITCHERDEF.cbCode, dd NAME(End) - NAME(Start)
at VMMSWITCHERDEF.offR0HostToGuest, dd NAME(vmmR0HostToGuest) - NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHost, dd NAME(vmmGCGuestToHost) - NAME(Start)
at VMMSWITCHERDEF.offGCCallTrampoline, dd NAME(vmmGCCallTrampoline) - NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHostAsm, dd NAME(VMMGCGuestToHostAsm) - NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHostAsmHyperCtx, dd NAME(VMMGCGuestToHostAsmHyperCtx)- NAME(Start)
at VMMSWITCHERDEF.offGCGuestToHostAsmGuestCtx, dd NAME(VMMGCGuestToHostAsmGuestCtx)- NAME(Start)
; disasm help
at VMMSWITCHERDEF.offHCCode0, dd 0
at VMMSWITCHERDEF.cbHCCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.offHCCode1, dd NAME(ICExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbHCCode1, dd NAME(End) - NAME(ICExitTarget)
at VMMSWITCHERDEF.offIDCode0, dd NAME(IDEnterTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode0, dd NAME(ICEnterTarget) - NAME(IDEnterTarget)
at VMMSWITCHERDEF.offIDCode1, dd NAME(IDExitTarget) - NAME(Start)
at VMMSWITCHERDEF.cbIDCode1, dd NAME(ICExitTarget) - NAME(Start)
at VMMSWITCHERDEF.offGCCode, dd 0
at VMMSWITCHERDEF.cbGCCode, dd 0
iend