PGMGst.h revision 09ac944321133cef251c9348a1859130d0528e91
1N/A/* $Id$ */
1N/A/** @file
1N/A * VBox - Page Manager / Monitor, Guest Paging Template.
1N/A */
1N/A
1N/A/*
1N/A * Copyright (C) 2006-2007 Sun Microsystems, Inc.
1N/A *
1N/A * This file is part of VirtualBox Open Source Edition (OSE), as
1N/A * available from http://www.virtualbox.org. This file is free software;
1N/A * you can redistribute it and/or modify it under the terms of the GNU
1N/A * General Public License (GPL) as published by the Free Software
1N/A * Foundation, in version 2 as it comes in the "COPYING" file of the
1N/A * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
1N/A * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
1N/A *
1N/A * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
1N/A * Clara, CA 95054 USA or visit http://www.sun.com if you need
1N/A * additional information or have any questions.
1N/A */
1N/A
1N/A/*******************************************************************************
1N/A* Defined Constants And Macros *
1N/A*******************************************************************************/
1N/A#undef GSTPT
1N/A#undef PGSTPT
1N/A#undef GSTPTE
1N/A#undef PGSTPTE
1N/A#undef GSTPD
1N/A#undef PGSTPD
1N/A#undef GSTPDE
1N/A#undef PGSTPDE
1N/A#undef GST_BIG_PAGE_SIZE
1N/A#undef GST_BIG_PAGE_OFFSET_MASK
1N/A#undef GST_PDE_PG_MASK
1N/A#undef GST_PDE_BIG_PG_MASK
1N/A#undef GST_PD_SHIFT
1N/A#undef GST_PD_MASK
1N/A#undef GST_PTE_PG_MASK
1N/A#undef GST_PT_SHIFT
1N/A#undef GST_PT_MASK
1N/A#undef GST_TOTAL_PD_ENTRIES
1N/A#undef GST_CR3_PAGE_MASK
1N/A#undef GST_PDPE_ENTRIES
1N/A#undef GST_GET_PDE_BIG_PG_GCPHYS
1N/A
1N/A#if PGM_GST_TYPE == PGM_TYPE_32BIT \
1N/A || PGM_GST_TYPE == PGM_TYPE_REAL \
1N/A || PGM_GST_TYPE == PGM_TYPE_PROT
1N/A# define GSTPT X86PT
1N/A# define PGSTPT PX86PT
1N/A# define GSTPTE X86PTE
1N/A# define PGSTPTE PX86PTE
1N/A# define GSTPD X86PD
1N/A# define PGSTPD PX86PD
1N/A# define GSTPDE X86PDE
1N/A# define PGSTPDE PX86PDE
1N/A# define GST_BIG_PAGE_SIZE X86_PAGE_4M_SIZE
1N/A# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_4M_OFFSET_MASK
1N/A# define GST_PDE_PG_MASK X86_PDE_PG_MASK
1N/A# define GST_PDE_BIG_PG_MASK X86_PDE4M_PG_MASK
1N/A# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) pgmGstGet4MBPhysPage(&pVM->pgm.s, PdeGst)
1N/A# define GST_PD_SHIFT X86_PD_SHIFT
1N/A# define GST_PD_MASK X86_PD_MASK
1N/A# define GST_TOTAL_PD_ENTRIES X86_PG_ENTRIES
1N/A# define GST_PTE_PG_MASK X86_PTE_PG_MASK
1N/A# define GST_PT_SHIFT X86_PT_SHIFT
1N/A# define GST_PT_MASK X86_PT_MASK
1N/A# define GST_CR3_PAGE_MASK X86_CR3_PAGE_MASK
1N/A
1N/A#elif PGM_GST_TYPE == PGM_TYPE_PAE \
1N/A || PGM_GST_TYPE == PGM_TYPE_AMD64
1N/A# define GSTPT X86PTPAE
1N/A# define PGSTPT PX86PTPAE
1N/A# define GSTPTE X86PTEPAE
1N/A# define PGSTPTE PX86PTEPAE
1N/A# define GSTPD X86PDPAE
1N/A# define PGSTPD PX86PDPAE
1N/A# define GSTPDE X86PDEPAE
1N/A# define PGSTPDE PX86PDEPAE
1N/A# define GST_BIG_PAGE_SIZE X86_PAGE_2M_SIZE
1N/A# define GST_BIG_PAGE_OFFSET_MASK X86_PAGE_2M_OFFSET_MASK
1N/A# define GST_PDE_PG_MASK X86_PDE_PAE_PG_MASK
1N/A# define GST_PDE_BIG_PG_MASK X86_PDE2M_PAE_PG_MASK
1N/A# define GST_GET_PDE_BIG_PG_GCPHYS(PdeGst) (PdeGst.u & GST_PDE_BIG_PG_MASK)
1N/A# define GST_PD_SHIFT X86_PD_PAE_SHIFT
1N/A# define GST_PD_MASK X86_PD_PAE_MASK
1N/A# if PGM_GST_TYPE == PGM_TYPE_PAE
1N/A# define GST_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPE_ENTRIES)
1N/A# define GST_PDPE_ENTRIES X86_PG_PAE_PDPE_ENTRIES
1N/A# else
1N/A# define GST_TOTAL_PD_ENTRIES (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPE_ENTRIES)
1N/A# define GST_PDPE_ENTRIES X86_PG_AMD64_PDPE_ENTRIES
1N/A# endif
1N/A# define GST_PTE_PG_MASK X86_PTE_PAE_PG_MASK
1N/A# define GST_PT_SHIFT X86_PT_PAE_SHIFT
1N/A# define GST_PT_MASK X86_PT_PAE_MASK
1N/A# define GST_CR3_PAGE_MASK X86_CR3_PAE_PAGE_MASK
1N/A#endif
1N/A
1N/A
1N/A/*******************************************************************************
1N/A* Internal Functions *
1N/A*******************************************************************************/
1N/A__BEGIN_DECLS
1N/A/* r3 */
1N/APGM_GST_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0);
1N/APGM_GST_DECL(int, Enter)(PVM pVM, RTGCPHYS GCPhysCR3);
1N/APGM_GST_DECL(int, Relocate)(PVM pVM, RTGCUINTPTR offDelta);
1N/APGM_GST_DECL(int, Exit)(PVM pVM);
1N/A
1N/Astatic DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
1N/Astatic DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
1N/A#if 0
1N/Astatic DECLCALLBACK(int) pgmR3GstPAEWriteHandlerPD(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
1N/A#endif
1N/A
1N/A/* all */
1N/APGM_GST_DECL(int, GetPage)(PVM pVM, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
1N/APGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
1N/APGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCUINTPTR GCPtr, PX86PDEPAE pPDE);
1N/APGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
1N/APGM_GST_DECL(int, UnmapCR3)(PVM pVM);
1N/APGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
1N/APGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
1N/A__END_DECLS
1N/A
1N/A
1N/A/**
1N/A * Initializes the guest bit of the paging mode data.
1N/A *
1N/A * @returns VBox status code.
1N/A * @param pVM The VM handle.
1N/A * @param fResolveGCAndR0 Indicate whether or not GC and Ring-0 symbols can be resolved now.
1N/A * This is used early in the init process to avoid trouble with PDM
1N/A * not being initialized yet.
1N/A */
1N/APGM_GST_DECL(int, InitData)(PVM pVM, PPGMMODEDATA pModeData, bool fResolveGCAndR0)
1N/A{
1N/A Assert(pModeData->uGstType == PGM_GST_TYPE);
1N/A
1N/A /* Ring-3 */
1N/A pModeData->pfnR3GstRelocate = PGM_GST_NAME(Relocate);
1N/A pModeData->pfnR3GstExit = PGM_GST_NAME(Exit);
1N/A pModeData->pfnR3GstGetPDE = PGM_GST_NAME(GetPDE);
1N/A pModeData->pfnR3GstGetPage = PGM_GST_NAME(GetPage);
1N/A pModeData->pfnR3GstModifyPage = PGM_GST_NAME(ModifyPage);
1N/A pModeData->pfnR3GstMapCR3 = PGM_GST_NAME(MapCR3);
1N/A pModeData->pfnR3GstUnmapCR3 = PGM_GST_NAME(UnmapCR3);
1N/A pModeData->pfnR3GstMonitorCR3 = PGM_GST_NAME(MonitorCR3);
1N/A pModeData->pfnR3GstUnmonitorCR3 = PGM_GST_NAME(UnmonitorCR3);
1N/A
1N/A#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
1N/A pModeData->pfnR3GstWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3);
1N/A pModeData->pszR3GstWriteHandlerCR3 = "Guest CR3 Write access handler";
1N/A pModeData->pfnR3GstPAEWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3);
1N/A pModeData->pszR3GstPAEWriteHandlerCR3 = "Guest CR3 Write access handler (PAE)";
1N/A#else
1N/A pModeData->pfnR3GstWriteHandlerCR3 = NULL;
1N/A pModeData->pszR3GstWriteHandlerCR3 = NULL;
1N/A pModeData->pfnR3GstPAEWriteHandlerCR3 = NULL;
1N/A pModeData->pszR3GstPAEWriteHandlerCR3 = NULL;
1N/A#endif
1N/A
1N/A if (fResolveGCAndR0)
1N/A {
1N/A int rc;
1N/A
1N/A#if PGM_SHW_TYPE != PGM_TYPE_AMD64 /* No AMD64 for traditional virtualization, only VT-x and AMD-V. */
1N/A /* GC */
1N/A rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPage), &pModeData->pfnRCGstGetPage);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_RC_STR(GetPage), rc), rc);
1N/A rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(ModifyPage), &pModeData->pfnRCGstModifyPage);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_RC_STR(ModifyPage), rc), rc);
1N/A rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPDE), &pModeData->pfnRCGstGetPDE);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_RC_STR(GetPDE), rc), rc);
1N/A rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(MonitorCR3), &pModeData->pfnRCGstMonitorCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_RC_STR(MonitorCR3), rc), rc);
1N/A rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(UnmonitorCR3), &pModeData->pfnRCGstUnmonitorCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_RC_STR(UnmonitorCR3), rc), rc);
1N/A rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(MapCR3), &pModeData->pfnRCGstMapCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_RC_STR(MapCR3), rc), rc);
1N/A rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(UnmapCR3), &pModeData->pfnRCGstUnmapCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_RC_STR(UnmapCR3), rc), rc);
1N/A# if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
1N/A rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(WriteHandlerCR3), &pModeData->pfnRCGstWriteHandlerCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc);
1N/A rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(WriteHandlerCR3), &pModeData->pfnRCGstPAEWriteHandlerCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc);
1N/A# endif
1N/A#endif /* Not AMD64 shadow paging. */
1N/A
1N/A /* Ring-0 */
1N/A rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(GetPage), &pModeData->pfnR0GstGetPage);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(GetPage), rc), rc);
1N/A rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(ModifyPage), &pModeData->pfnR0GstModifyPage);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(ModifyPage), rc), rc);
1N/A rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(GetPDE), &pModeData->pfnR0GstGetPDE);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(GetPDE), rc), rc);
1N/A rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(MonitorCR3), &pModeData->pfnR0GstMonitorCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(MonitorCR3), rc), rc);
1N/A rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(UnmonitorCR3), &pModeData->pfnR0GstUnmonitorCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(UnmonitorCR3), rc), rc);
1N/A rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(MapCR3), &pModeData->pfnR0GstMapCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(MapCR3), rc), rc);
1N/A rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(UnmapCR3), &pModeData->pfnR0GstUnmapCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(UnmapCR3), rc), rc);
1N/A#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
1N/A rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstWriteHandlerCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc);
1N/A rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstPAEWriteHandlerCR3);
1N/A AssertMsgRCReturn(rc, ("%s -> rc=%Vrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc);
1N/A#endif
1N/A }
1N/A
1N/A return VINF_SUCCESS;
1N/A}
1N/A
1N/A
1N/A/**
1N/A * Enters the guest mode.
1N/A *
1N/A * @returns VBox status code.
1N/A * @param pVM VM handle.
1N/A * @param GCPhysCR3 The physical address from the CR3 register.
1N/A */
1N/APGM_GST_DECL(int, Enter)(PVM pVM, RTGCPHYS GCPhysCR3)
1N/A{
1N/A /*
1N/A * Map and monitor CR3
1N/A */
1N/A int rc = PGM_GST_NAME(MapCR3)(pVM, GCPhysCR3);
1N/A if (VBOX_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
1N/A rc = PGM_GST_NAME(MonitorCR3)(pVM, GCPhysCR3);
1N/A return rc;
1N/A}
1N/A
1N/A
1N/A/**
1N/A * Relocate any GC pointers related to guest mode paging.
1N/A *
1N/A * @returns VBox status code.
1N/A * @param pVM The VM handle.
1N/A * @param offDelta The reloation offset.
1N/A */
1N/APGM_GST_DECL(int, Relocate)(PVM pVM, RTGCUINTPTR offDelta)
1N/A{
1N/A /* nothing special to do here - InitData does the job. */
1N/A return VINF_SUCCESS;
1N/A}
1N/A
1N/A
1N/A/**
1N/A * Exits the guest mode.
1N/A *
1N/A * @returns VBox status code.
1N/A * @param pVM VM handle.
1N/A */
1N/APGM_GST_DECL(int, Exit)(PVM pVM)
1N/A{
1N/A int rc = PGM_GST_NAME(UnmonitorCR3)(pVM);
1N/A if (VBOX_SUCCESS(rc))
1N/A rc = PGM_GST_NAME(UnmapCR3)(pVM);
1N/A return rc;
1N/A}
1N/A
1N/A
1N/A#if PGM_GST_TYPE == PGM_TYPE_32BIT
1N/A/**
1N/A * Physical write access for the Guest CR3 in 32-bit mode.
1N/A *
1N/A * @returns VINF_SUCCESS if the handler have carried out the operation.
1N/A * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1N/A * @param pVM VM Handle.
1N/A * @param GCPhys The physical address the guest is writing to.
1N/A * @param pvPhys The HC mapping of that address.
1N/A * @param pvBuf What the guest is reading/writing.
1N/A * @param cbBuf How much it's reading/writing.
1N/A * @param enmAccessType The access type.
1N/A * @param pvUser User argument.
1N/A */
1N/Astatic DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1N/A{
1N/A AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1N/A Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1N/A Log2(("pgmR3Gst32BitWriteHandlerCR3: ff=%#x GCPhys=%VGp pvPhys=%p cbBuf=%d pvBuf={%.*Vhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
1N/A
1N/A /*
1N/A * Do the write operation.
1N/A */
1N/A memcpy(pvPhys, pvBuf, cbBuf);
1N/A if ( !pVM->pgm.s.fMappingsFixed
1N/A && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
1N/A {
1N/A /*
1N/A * Check for conflicts.
1N/A */
1N/A const RTGCUINTPTR offPD = GCPhys & PAGE_OFFSET_MASK;
1N/A const unsigned iPD1 = offPD / sizeof(X86PDE);
1N/A const unsigned iPD2 = (offPD + cbBuf - 1) / sizeof(X86PDE);
1N/A Assert(iPD1 - iPD2 <= 1);
1N/A if ( ( pVM->pgm.s.pGuestPDHC->a[iPD1].n.u1Present
1N/A && pgmGetMapping(pVM, iPD1 << X86_PD_SHIFT) )
1N/A || ( iPD1 != iPD2
1N/A && pVM->pgm.s.pGuestPDHC->a[iPD2].n.u1Present
1N/A && pgmGetMapping(pVM, iPD2 << X86_PD_SHIFT) )
1N/A )
1N/A {
1N/A Log(("pgmR3Gst32BitWriteHandlerCR3: detected conflict. iPD1=%#x iPD2=%#x GCPhys=%VGp\n", iPD1, iPD2, GCPhys));
1N/A STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWriteConflict);
1N/A VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
1N/A }
1N/A }
1N/A
1N/A STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);
1N/A return VINF_SUCCESS;
1N/A}
1N/A#endif /* 32BIT */
1N/A
1N/A#if PGM_GST_TYPE == PGM_TYPE_PAE
1N/A
1N/A/**
1N/A * Physical write access handler for the Guest CR3 in PAE mode.
1N/A *
1N/A * @returns VINF_SUCCESS if the handler have carried out the operation.
1N/A * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1N/A * @param pVM VM Handle.
1N/A * @param GCPhys The physical address the guest is writing to.
1N/A * @param pvPhys The HC mapping of that address.
1N/A * @param pvBuf What the guest is reading/writing.
1N/A * @param cbBuf How much it's reading/writing.
1N/A * @param enmAccessType The access type.
1N/A * @param pvUser User argument.
1N/A */
1N/Astatic DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1N/A{
1N/A AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1N/A Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1N/A Log2(("pgmR3GstPAEWriteHandlerCR3: ff=%#x GCPhys=%VGp pvPhys=%p cbBuf=%d pvBuf={%.*Vhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
1N/A
1N/A /*
1N/A * Do the write operation.
1N/A */
1N/A memcpy(pvPhys, pvBuf, cbBuf);
1N/A if ( !pVM->pgm.s.fMappingsFixed
1N/A && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
1N/A {
1N/A /*
1N/A * Check if any of the PDs have changed.
1N/A * We'll simply check all of them instead of figuring out which one/two to check.
1N/A */
1N/A for (unsigned i = 0; i < 4; i++)
1N/A {
1N/A if ( pVM->pgm.s.pGstPaePDPTHC->a[i].n.u1Present
1N/A && (pVM->pgm.s.pGstPaePDPTHC->a[i].u & X86_PDPE_PG_MASK) != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
1N/A {
1N/A Log(("pgmR3GstPAEWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%VGp\n",
1N/A i, pVM->pgm.s.pGstPaePDPTHC->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
1N/A /*
1N/A * The PD has changed.
1N/A * We will schedule a monitoring update for the next TLB Flush,
1N/A * InvalidatePage or SyncCR3.
1N/A *
1N/A * This isn't perfect, because a lazy page sync might be dealing with an half
1N/A * updated PDPE. However, we assume that the guest OS is disabling interrupts
1N/A * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
1N/A * executing.
1N/A */
1N/A pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1N/A }
1N/A }
1N/A }
1N/A /*
1N/A * Flag a updating of the monitor at the next crossroad so we don't monitor the
1N/A * wrong pages for soo long that they can be reused as code pages and freak out
1N/A * the recompiler or something.
1N/A */
1N/A else
1N/A pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1N/A
1N/A
1N/A STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);
1N/A return VINF_SUCCESS;
1N/A}
1N/A
1N/A# if 0
1N/A/**
1N/A * Physical write access for Guest CR3.
1N/A *
1N/A * @returns VINF_SUCCESS if the handler have carried out the operation.
1N/A * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1N/A * @param pVM VM Handle.
1N/A * @param GCPhys The physical address the guest is writing to.
1N/A * @param pvPhys The HC mapping of that address.
1N/A * @param pvBuf What the guest is reading/writing.
1N/A * @param cbBuf How much it's reading/writing.
1N/A * @param enmAccessType The access type.
1N/A * @param pvUser User argument.
1N/A */
1N/Astatic DECLCALLBACK(int) pgmR3GstPAEWriteHandlerPD(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
1N/A{
1N/A AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
1N/A Assert(enmAccessType == PGMACCESSTYPE_WRITE);
1N/A Log2(("pgmR3GstPAEWriteHandlerPD: ff=%#x GCPhys=%VGp pvPhys=%p cbBuf=%d pvBuf={%.*Vhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
1N/A
1N/A /*
1N/A * Do the write operation.
1N/A */
1N/A memcpy(pvPhys, pvBuf, cbBuf);
if ( !pVM->pgm.s.fMappingsFixed
&& !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
{
/*
* Figure out which of the 4 PDs this is.
*/
unsigned i;
for (i = 0; i < 4; i++)
if (pVM->pgm.s.pGstPaePDPTHC->a[i].u == (GCPhys & X86_PTE_PAE_PG_MASK))
{
PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
const RTGCUINTPTR offPD = GCPhys & PAGE_OFFSET_MASK;
const unsigned iPD1 = offPD / sizeof(X86PDEPAE);
const unsigned iPD2 = (offPD + cbBuf - 1) / sizeof(X86PDEPAE);
Assert(iPD1 - iPD2 <= 1);
if ( ( pPDSrc->a[iPD1].n.u1Present
&& pgmGetMapping(pVM, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)) )
|| ( iPD1 != iPD2
&& pPDSrc->a[iPD2].n.u1Present
&& pgmGetMapping(pVM, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)) )
)
{
Log(("pgmR3GstPaePD3WriteHandler: detected conflict. i=%d iPD1=%#x iPD2=%#x GCPhys=%VGp\n",
i, iPD1, iPD2, GCPhys));
STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWriteConflict);
VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
}
break; /* ASSUMES no duplicate entries... */
}
Assert(i < 4);
}
STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);
return VINF_SUCCESS;
}
# endif
#endif /* PAE */