PGMR0.cpp revision 5654aa8329bbe2838fa5733f28c1a0461c9e6453
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/* $Id$ */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/** @file
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * PGM - Page Manager and Monitor, Ring-0.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/*
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Copyright (C) 2007-2011 Oracle Corporation
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * available from http://www.virtualbox.org. This file is free software;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * you can redistribute it and/or modify it under the terms of the GNU
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * General Public License (GPL) as published by the Free Software
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/*******************************************************************************
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync* Header Files *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync*******************************************************************************/
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#define LOG_GROUP LOG_GROUP_PGM
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include <VBox/rawpci.h>
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include <VBox/vmm/pgm.h>
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include <VBox/vmm/gmm.h>
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include <VBox/vmm/gvm.h>
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include "PGMInternal.h"
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include <VBox/vmm/vm.h>
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include "PGMInline.h"
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include <VBox/log.h>
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include <VBox/err.h>
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include <iprt/assert.h>
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include <iprt/mem.h>
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/*
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Instantiate the ring-0 header/code templates.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include "PGMR0Bth.h"
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#undef PGM_BTH_NAME
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include "PGMR0Bth.h"
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#undef PGM_BTH_NAME
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include "PGMR0Bth.h"
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#undef PGM_BTH_NAME
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#include "PGMR0Bth.h"
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#undef PGM_BTH_NAME
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/**
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Worker function for PGMR3PhysAllocateHandyPages and pgmPhysEnsureHandyPage.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @returns The following VBox status codes.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @retval VINF_SUCCESS on success. FF cleared.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is set in this case.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param pVM The VM handle.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param pVCpu The VMCPU handle.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @remarks Must be called from within the PGM critical section. The caller
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * must clear the new pages.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsyncVMMR0DECL(int) PGMR0PhysAllocateHandyPages(PVM pVM, PVMCPU pVCpu)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync{
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu));
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync /*
68fb2428898c55a7172e6a75a0a8d7ce259919bdvboxsync * Check for error injection.
68fb2428898c55a7172e6a75a0a8d7ce259919bdvboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (RT_UNLIKELY(pVM->pgm.s.fErrInjHandyPages))
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync return VERR_NO_MEMORY;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync /*
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * Try allocate a full set of handy pages.
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync */
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync uint32_t iFirst = pVM->pgm.s.cHandyPages;
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync AssertReturn(iFirst <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), VERR_INTERNAL_ERROR);
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync uint32_t cPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages) - iFirst;
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync if (!cPages)
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync return VINF_SUCCESS;
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync int rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, cPages, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync if (RT_SUCCESS(rc))
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync {
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync for (uint32_t i = 0; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync {
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync }
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync pVM->pgm.s.cHandyPages = RT_ELEMENTS(pVM->pgm.s.aHandyPages);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync }
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync else if (rc != VERR_GMM_SEED_ME)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync && iFirst < PGM_HANDY_PAGES_MIN)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#ifdef VBOX_STRICT
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync /* We're ASSUMING that GMM has updated all the entires before failing us. */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync uint32_t i;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync for (i = iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync }
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#endif
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync /*
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Reduce the number of pages until we hit the minimum limit.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync do
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync cPages >>= 1;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (cPages + iFirst < PGM_HANDY_PAGES_MIN)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync cPages = PGM_HANDY_PAGES_MIN - iFirst;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync rc = GMMR0AllocateHandyPages(pVM, pVCpu->idCpu, 0, cPages, &pVM->pgm.s.aHandyPages[iFirst]);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync } while ( ( rc == VERR_GMM_HIT_GLOBAL_LIMIT
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync || rc == VERR_GMM_HIT_VM_ACCOUNT_LIMIT)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync && cPages + iFirst > PGM_HANDY_PAGES_MIN);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (RT_SUCCESS(rc))
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#ifdef VBOX_STRICT
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync i = iFirst + cPages;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync while (i-- > 0)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].idPage != NIL_GMM_PAGEID);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].idPage <= GMM_PAGEID_LAST);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys != NIL_RTHCPHYS);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(!(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync }
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync for (i = cPages + iFirst; i < RT_ELEMENTS(pVM->pgm.s.aHandyPages); i++)
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync {
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync Assert(pVM->pgm.s.aHandyPages[i].idPage == NIL_GMM_PAGEID);
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync Assert(pVM->pgm.s.aHandyPages[i].idSharedPage == NIL_GMM_PAGEID);
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync Assert(pVM->pgm.s.aHandyPages[i].HCPhysGCPhys == NIL_RTHCPHYS);
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync }
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync#endif
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync pVM->pgm.s.cHandyPages = iFirst + cPages;
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync }
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync }
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (RT_FAILURE(rc) && rc != VERR_GMM_SEED_ME)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync LogRel(("PGMR0PhysAllocateHandyPages: rc=%Rrc iFirst=%d cPages=%d\n", rc, iFirst, cPages));
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync VM_FF_SET(pVM, VM_FF_PGM_NO_MEMORY);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync }
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync }
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync LogFlow(("PGMR0PhysAllocateHandyPages: cPages=%d rc=%Rrc\n", cPages, rc));
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync return rc;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync}
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/**
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Worker function for PGMR3PhysAllocateLargeHandyPage
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @returns The following VBox status codes.
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @retval VINF_SUCCESS on success.
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @retval VINF_EM_NO_MEMORY if we're out of memory.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param pVM The VM handle.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param pVCpu The VMCPU handle.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @remarks Must be called from within the PGM critical section. The caller
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * must clear the new pages.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsyncVMMR0DECL(int) PGMR0PhysAllocateLargeHandyPage(PVM pVM, PVMCPU pVCpu)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync{
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(PDMCritSectIsOwnerEx(&pVM->pgm.s.CritSect, pVCpu));
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync Assert(!pVM->pgm.s.cLargeHandyPages);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync int rc = GMMR0AllocateLargePage(pVM, pVCpu->idCpu, _2M, &pVM->pgm.s.aLargeHandyPage[0].idPage, &pVM->pgm.s.aLargeHandyPage[0].HCPhysGCPhys);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (RT_SUCCESS(rc))
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync pVM->pgm.s.cLargeHandyPages = 1;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync return rc;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync}
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#ifdef VBOX_WITH_PCI_PASSTHROUGH
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/* Interface sketch. The interface belongs to a global PCI pass-through
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync manager. It shall use the global VM handle, not the user VM handle to
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync store the per-VM info (domain) since that is all ring-0 stuff, thus
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync passing pGVM here. I've tentitively prefixed the functions 'GPciRawR0',
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync we can discuss the PciRaw code re-organtization when I'm back from
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync vacation.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync I've implemented the initial IOMMU set up below. For things to work
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync reliably, we will probably need add a whole bunch of checks and
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync GPciRawR0GuestPageUpdate call to the PGM code. For the present,
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync assuming nested paging (enforced) and prealloc (enforced), no
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync ballooning (check missing), page sharing (check missing) or live
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync migration (check missing), it might work fine. At least if some
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync VM power-off hook is present and can tear down the IOMMU page tables. */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/**
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Tells the global PCI pass-through manager that we are about to set up the
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * guest page to host page mappings for the specfied VM.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @returns VBox status code.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param pGVM The ring-0 VM structure.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsyncVMMR0_INT_DECL(int) GPciRawR0GuestPageBeginAssignments(PGVM pGVM)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync{
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync return VINF_SUCCESS;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync}
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/**
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Assigns a host page mapping for a guest page.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * This is only used when setting up the mappings, i.e. between
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @returns VBox status code.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param pGVM The ring-0 VM structure.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param GCPhys The address of the guest page (page aligned).
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param HCPhys The address of the host page (page aligned).
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsyncVMMR0_INT_DECL(int) GPciRawR0GuestPageAssign(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync{
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync AssertReturn(!(HCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (pGVM->rawpci.s.pfnContigMemInfo)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync /** @todo: what do we do on failure? */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, HCPhys, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_MAP);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync return VINF_SUCCESS;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync}
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/**
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Indicates that the specified guest page doesn't exists but doesn't have host
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * page mapping we trust PCI pass-through with.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * This is only used when setting up the mappings, i.e. between
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * GPciRawR0GuestPageBeginAssignments and GPciRawR0GuestPageEndAssignments.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @returns VBox status code.
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @param pGVM The ring-0 VM structure.
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @param GCPhys The address of the guest page (page aligned).
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @param HCPhys The address of the host page (page aligned).
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync */
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsyncVMMR0_INT_DECL(int) GPciRawR0GuestPageUnassign(PGVM pGVM, RTGCPHYS GCPhys)
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync{
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_3);
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync if (pGVM->rawpci.s.pfnContigMemInfo)
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync /** @todo: what do we do on failure? */
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync pGVM->rawpci.s.pfnContigMemInfo(&pGVM->rawpci.s, 0, GCPhys, PAGE_SIZE, PCIRAW_MEMINFO_UNMAP);
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync return VINF_SUCCESS;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync}
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/**
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Tells the global PCI pass-through manager that we have completed setting up
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * the guest page to host page mappings for the specfied VM.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * This complements GPciRawR0GuestPageBeginAssignments and will be called even
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * if some page assignment failed.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @returns VBox status code.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param pGVM The ring-0 VM structure.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsyncVMMR0_INT_DECL(int) GPciRawR0GuestPageEndAssignments(PGVM pGVM)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync{
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync return VINF_SUCCESS;
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync}
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync/**
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * Tells the global PCI pass-through manager that a guest page mapping has
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * changed after the initial setup.
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync *
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @returns VBox status code.
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @param pGVM The ring-0 VM structure.
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @param GCPhys The address of the guest page (page aligned).
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * @param HCPhys The new host page address or NIL_RTHCPHYS if
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync * now unassigned.
2a0a20dee7f474c26cc8f6f9d7aa12c345c2b73bvboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsyncVMMR0_INT_DECL(int) GPciRawR0GuestPageUpdate(PGVM pGVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync{
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync AssertReturn(!(GCPhys & PAGE_OFFSET_MASK), VERR_INTERNAL_ERROR_4);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync AssertReturn(!(HCPhys & PAGE_OFFSET_MASK) || HCPhys == NIL_RTHCPHYS, VERR_INTERNAL_ERROR_4);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync return VINF_SUCCESS;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync}
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#endif /* VBOX_WITH_PCI_PASSTHROUGH */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/**
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * Sets up the IOMMU when raw PCI device is enabled.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @note This is a hack that will probably be remodelled and refined later!
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @returns VBox status code.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync *
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * @param pVM The VM handle.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsyncVMMR0_INT_DECL(int) PGMR0PhysSetupIommu(PVM pVM)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync{
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync PGVM pGVM;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync int rc = GVMMR0ByVM(pVM, &pGVM);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (RT_FAILURE(rc))
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync return rc;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#ifdef VBOX_WITH_PCI_PASSTHROUGH
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (pVM->pgm.s.fPciPassthrough)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync /*
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * The Simplistic Approach - Enumerate all the pages and call tell the
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync * IOMMU about each of them.
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync pgmLock(pVM);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync rc = GPciRawR0GuestPageBeginAssignments(pGVM);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (RT_SUCCESS(rc))
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync PPGMPAGE pPage = &pRam->aPages[0];
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync RTGCPHYS GCPhys = pRam->GCPhys;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync uint32_t cLeft = pRam->cb >> PAGE_SHIFT;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync while (cLeft-- > 0)
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync {
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync /* Only expose pages that are 100% safe for now. */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync && !PGM_PAGE_HAS_ANY_HANDLERS(pPage))
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync rc = GPciRawR0GuestPageAssign(pGVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage));
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync else
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync rc = GPciRawR0GuestPageUnassign(pGVM, GCPhys);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync /* next */
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync pPage++;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync GCPhys += PAGE_SIZE;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync }
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync }
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync int rc2 = GPciRawR0GuestPageEndAssignments(pGVM);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync if (RT_FAILURE(rc2) && RT_SUCCESS(rc))
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync rc = rc2;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync }
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync pgmUnlock(pVM);
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync }
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync else
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync#endif
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync rc = VERR_NOT_SUPPORTED;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync return rc;
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync}
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync
e52b0dfe3d9ead70e24f2ce4ed3faa6c79c00618vboxsync/**
* #PF Handler for nested paging.
*
* @returns VBox status code (appropriate for trap handling and GC return).
* @param pVM VM Handle.
* @param pVCpu VMCPU Handle.
* @param enmShwPagingMode Paging mode for the nested page tables.
* @param uErr The trap error code.
* @param pRegFrame Trap register frame.
* @param GCPhysFault The fault address.
*/
VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr,
PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault)
{
int rc;
LogFlow(("PGMTrap0eHandler: uErr=%RGx GCPhysFault=%RGp eip=%RGv\n", uErr, GCPhysFault, (RTGCPTR)pRegFrame->rip));
STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
/* AMD uses the host's paging mode; Intel has a single mode (EPT). */
AssertMsg( enmShwPagingMode == PGMMODE_32_BIT || enmShwPagingMode == PGMMODE_PAE || enmShwPagingMode == PGMMODE_PAE_NX
|| enmShwPagingMode == PGMMODE_AMD64 || enmShwPagingMode == PGMMODE_AMD64_NX || enmShwPagingMode == PGMMODE_EPT,
("enmShwPagingMode=%d\n", enmShwPagingMode));
/* Reserved shouldn't end up here. */
Assert(!(uErr & X86_TRAP_PF_RSVD));
#ifdef VBOX_WITH_STATISTICS
/*
* Error code stats.
*/
if (uErr & X86_TRAP_PF_US)
{
if (!(uErr & X86_TRAP_PF_P))
{
if (uErr & X86_TRAP_PF_RW)
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentWrite);
else
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNotPresentRead);
}
else if (uErr & X86_TRAP_PF_RW)
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSWrite);
else if (uErr & X86_TRAP_PF_RSVD)
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSReserved);
else if (uErr & X86_TRAP_PF_ID)
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSNXE);
else
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eUSRead);
}
else
{ /* Supervisor */
if (!(uErr & X86_TRAP_PF_P))
{
if (uErr & X86_TRAP_PF_RW)
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentWrite);
else
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVNotPresentRead);
}
else if (uErr & X86_TRAP_PF_RW)
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVWrite);
else if (uErr & X86_TRAP_PF_ID)
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSNXE);
else if (uErr & X86_TRAP_PF_RSVD)
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eSVReserved);
}
#endif
/*
* Call the worker.
*
* Note! We pretend the guest is in protected mode without paging, so we
* can use existing code to build the nested page tables.
*/
bool fLockTaken = false;
switch(enmShwPagingMode)
{
case PGMMODE_32_BIT:
rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
break;
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
break;
case PGMMODE_AMD64:
case PGMMODE_AMD64_NX:
rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
break;
case PGMMODE_EPT:
rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVCpu, uErr, pRegFrame, GCPhysFault, &fLockTaken);
break;
default:
AssertFailed();
rc = VERR_INVALID_PARAMETER;
break;
}
if (fLockTaken)
{
PGM_LOCK_ASSERT_OWNER(pVM);
pgmUnlock(pVM);
}
if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
rc = VINF_SUCCESS;
/* Note: hack alert for difficult to reproduce problem. */
else if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
|| rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
|| rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
|| rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
{
Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGp error code %x (rip=%RGv)\n", rc, GCPhysFault, uErr, pRegFrame->rip));
/* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about
single VCPU VMs though. */
rc = VINF_SUCCESS;
}
STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Misc; });
STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
return rc;
}
/**
* #PF Handler for deliberate nested paging misconfiguration (/reserved bit)
* employed for MMIO pages.
*
* @returns VBox status code (appropriate for trap handling and GC return).
* @param pVM The VM Handle.
* @param pVCpu The current CPU.
* @param enmShwPagingMode Paging mode for the nested page tables.
* @param pRegFrame Trap register frame.
* @param GCPhysFault The fault address.
* @param uErr The error code, UINT32_MAX if not available
* (VT-x).
*/
VMMR0DECL(VBOXSTRICTRC) PGMR0Trap0eHandlerNPMisconfig(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode,
PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, uint32_t uErr)
{
#ifdef PGM_WITH_MMIO_OPTIMIZATIONS
STAM_PROFILE_START(&pVCpu->CTX_SUFF(pStats)->StatR0NpMiscfg, a);
VBOXSTRICTRC rc;
/*
* Try lookup the all access physical handler for the address.
*/
pgmLock(pVM);
PPGMPHYSHANDLER pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
if (RT_LIKELY(pHandler && pHandler->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE))
{
/*
* If the handle has aliases page or pages that have been temporarily
* disabled, we'll have to take a detour to make sure we resync them
* to avoid lots of unnecessary exits.
*/
PPGMPAGE pPage;
if ( ( pHandler->cAliasedPages
|| pHandler->cTmpOffPages)
&& ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL
|| PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
)
{
Log(("PGMR0Trap0eHandlerNPMisconfig: Resyncing aliases / tmp-off page at %RGp (uErr=%#x) %R[pgmpage]\n", GCPhysFault, uErr, pPage));
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
pgmUnlock(pVM);
}
else
{
if (pHandler->CTX_SUFF(pfnHandler))
{
CTX_MID(PFNPGM,PHYSHANDLER) pfnHandler = pHandler->CTX_SUFF(pfnHandler);
void *pvUser = pHandler->CTX_SUFF(pvUser);
STAM_PROFILE_START(&pHandler->Stat, h);
pgmUnlock(pVM);
Log6(("PGMR0Trap0eHandlerNPMisconfig: calling %p(,%#x,,%RGp,%p)\n", pfnHandler, uErr, GCPhysFault, pvUser));
rc = pfnHandler(pVM, uErr == UINT32_MAX ? RTGCPTR_MAX : uErr, pRegFrame, GCPhysFault, GCPhysFault, pvUser);
#ifdef VBOX_WITH_STATISTICS
pgmLock(pVM);
pHandler = pgmHandlerPhysicalLookup(pVM, GCPhysFault);
if (pHandler)
STAM_PROFILE_STOP(&pHandler->Stat, h);
pgmUnlock(pVM);
#endif
}
else
{
pgmUnlock(pVM);
Log(("PGMR0Trap0eHandlerNPMisconfig: %RGp (uErr=%#x) -> R3\n", GCPhysFault, uErr));
rc = VINF_EM_RAW_EMULATE_INSTR;
}
}
}
else
{
/*
* Must be out of sync, so do a SyncPage and restart the instruction.
*
* ASSUMES that ALL handlers are page aligned and covers whole pages
* (assumption asserted in PGMHandlerPhysicalRegisterEx).
*/
Log(("PGMR0Trap0eHandlerNPMisconfig: Out of sync page at %RGp (uErr=%#x)\n", GCPhysFault, uErr));
STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfgSyncPage);
rc = pgmShwSyncNestedPageLocked(pVCpu, GCPhysFault, 1 /*cPages*/, enmShwPagingMode);
pgmUnlock(pVM);
}
STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0NpMiscfg, a);
return rc;
#else
AssertLogRelFailed();
return VERR_INTERNAL_ERROR_4;
#endif
}