PGMAllMap.cpp revision d1a7777830e66f060c61e169b7ec4353bdfbf3e9
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync/* $Id$ */
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync/** @file
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * PGM - Page Manager and Monitor - All context code.
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync */
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync/*
e64031e20c39650a7bc902a3e1aba613b9415deevboxsync * Copyright (C) 2006-2007 Sun Microsystems, Inc.
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync *
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * available from http://www.virtualbox.org. This file is free software;
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * you can redistribute it and/or modify it under the terms of the GNU
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * General Public License (GPL) as published by the Free Software
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync *
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * additional information or have any questions.
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync */
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync/*******************************************************************************
682a27d94b9116c719038882487b99053985f91avboxsync* Header Files *
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync*******************************************************************************/
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync#define LOG_GROUP LOG_GROUP_PGM
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync#include <VBox/pgm.h>
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync#include "PGMInternal.h"
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync#include <VBox/vm.h>
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync#include <iprt/assert.h>
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync#include <iprt/asm.h>
682a27d94b9116c719038882487b99053985f91avboxsync#include <VBox/err.h>
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync
b0b8c85eb454e2a7ad926bbefda6d908932291e3vboxsync/**
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync * Maps a range of physical pages at a given virtual address
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * in the guest context.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync *
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * The GC virtual address range must be within an existing mapping.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync *
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @returns VBox status code.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param pVM The virtual machine.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param GCPtr Where to map the page(s). Must be page aligned.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param HCPhys Start of the range of physical pages. Must be page aligned.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param cbPages Number of bytes to map. Must be page aligned.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param fFlags Page flags (X86_PTE_*).
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsyncVMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags)
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync{
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n"));
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync /*
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync * Validate input.
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr));
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages));
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags));
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync /* hypervisor defaults */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync if (!fFlags)
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync /*
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * Find the mapping.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync while (pCur)
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync {
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync if (GCPtr - pCur->GCPtr < pCur->cb)
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync {
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync if (GCPtr + cbPages - 1 > pCur->GCPtrLast)
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync {
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync AssertMsgFailed(("Invalid range!!\n"));
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync return VERR_INVALID_PARAMETER;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync }
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync /*
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * Setup PTE.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync X86PTEPAE Pte;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK);
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync /*
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * Update the page tables.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync for (;;)
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync {
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync RTGCUINTPTR off = GCPtr - pCur->GCPtr;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync const unsigned iPT = off >> X86_PD_SHIFT;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync /* 32-bit */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync /* pae */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512].u = Pte.u;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync /* next */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync cbPages -= PAGE_SIZE;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync if (!cbPages)
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync break;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync GCPtr += PAGE_SIZE;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync Pte.u += PAGE_SIZE;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync }
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync return VINF_SUCCESS;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync }
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync /* next */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync pCur = pCur->CTX_SUFF(pNext);
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync }
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr));
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync return VERR_INVALID_PARAMETER;
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync}
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync/**
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * Sets (replaces) the page flags for a range of pages in a mapping.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync *
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @returns VBox status.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param pVM VM handle.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param GCPtr Virtual address of the first page in the range.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param cb Size (in bytes) of the range to apply the modification to.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsyncVMMDECL(int) PGMMapSetPage(PVM pVM, RTGCPTR GCPtr, uint64_t cb, uint64_t fFlags)
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync{
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync return PGMMapModifyPage(pVM, GCPtr, cb, fFlags, 0);
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync}
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync/**
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * Modify page flags for a range of pages in a mapping.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync *
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * The existing flags are ANDed with the fMask and ORed with the fFlags.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync *
4893c4e3513fece2ef56f559aa2370c25f12a745vboxsync * @returns VBox status code.
4893c4e3513fece2ef56f559aa2370c25f12a745vboxsync * @param pVM VM handle.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param GCPtr Virtual address of the first page in the range.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param cb Size (in bytes) of the range to apply the modification to.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync */
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsyncVMMDECL(int) PGMMapModifyPage(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
bc6df168670cfc0df5ae3be50ccbf098ba2cebc7vboxsync{
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync /*
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync * Validate input.
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync */
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#x\n", fFlags));
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync Assert(cb);
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync /*
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync * Align the input.
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync */
eb360054919960bec0e41ea49932c591096c2f5fvboxsync cb += (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync cb = RT_ALIGN_Z(cb, PAGE_SIZE);
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync GCPtr = (RTGCPTR)((RTGCUINTPTR)GCPtr & PAGE_BASE_GC_MASK);
324b7b7cb0e3e95595f5fb069043ff3643891ba2vboxsync
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync /*
eb360054919960bec0e41ea49932c591096c2f5fvboxsync * Find the mapping.
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync */
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings);
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync while (pCur)
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync {
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync RTGCUINTPTR off = (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pCur->GCPtr;
d3203e1a8d5b114c66238ae33506f6e1a3d79b9evboxsync if (off < pCur->cb)
d3203e1a8d5b114c66238ae33506f6e1a3d79b9evboxsync {
eb360054919960bec0e41ea49932c591096c2f5fvboxsync AssertMsgReturn(off + cb <= pCur->cb,
d3203e1a8d5b114c66238ae33506f6e1a3d79b9evboxsync ("Invalid page range %#x LB%#x. mapping '%s' %#x to %#x\n",
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync GCPtr, cb, pCur->pszDesc, pCur->GCPtr, pCur->GCPtrLast),
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync VERR_INVALID_PARAMETER);
d3203e1a8d5b114c66238ae33506f6e1a3d79b9evboxsync
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync /*
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync * Perform the requested operation.
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync */
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync while (cb > 0)
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync {
d3203e1a8d5b114c66238ae33506f6e1a3d79b9evboxsync unsigned iPT = off >> X86_PD_SHIFT;
23776fb44216ff7aa3b47916ef4975748de89de1vboxsync unsigned iPTE = (off >> PAGE_SHIFT) & X86_PT_MASK;
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync while (cb > 0 && iPTE < RT_ELEMENTS(pCur->aPTs[iPT].CTX_SUFF(pPT)->a))
eb360054919960bec0e41ea49932c591096c2f5fvboxsync {
019f39cc548f50d58cfdf91965f58eab1ded841bvboxsync /* 32-Bit */
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u &= fMask | X86_PTE_PG_MASK;
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPTE].u |= fFlags & ~X86_PTE_PG_MASK;
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync /* PAE */
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u &= fMask | X86_PTE_PAE_PG_MASK;
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPTE / 512].a[iPTE % 512].u |= fFlags & ~X86_PTE_PAE_PG_MASK;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync /* invalidate tls */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync PGM_INVL_PG((RTGCUINTPTR)pCur->GCPtr + off);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync /* next */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync iPTE++;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync cb -= PAGE_SIZE;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync off += PAGE_SIZE;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync return VINF_SUCCESS;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync /* next */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pCur = pCur->CTX_SUFF(pNext);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync AssertMsgFailed(("Page range %#x LB%#x not found\n", GCPtr, cb));
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync return VERR_INVALID_PARAMETER;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync}
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync#ifndef IN_RING0
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync/**
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync * Sets all PDEs involved with the mapping in the shadow page table.
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync *
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync * @param pVM The VM handle.
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync * @param pMap Pointer to the mapping in question.
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping.
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsyncvoid pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync{
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync Log(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync return;
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync return; /* too early */
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync#endif
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync Assert(enmShadowMode <= PGMMODE_PAE_NX);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync /*
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync * Init the page tables and insert them into the page directories.
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync unsigned i = pMap->cPTs;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync iNewPDE += i;
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync while (i-- > 0)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync iNewPDE--;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync switch(enmShadowMode)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync case PGMMODE_32_BIT:
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync AssertFatal(pShw32BitPd);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync if (pShw32BitPd->a[iNewPDE].n.u1Present)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync Assert(!(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iNewPDE);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync X86PDE Pde;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pShw32BitPd->a[iNewPDE] = Pde;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync break;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync case PGMMODE_PAE:
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync case PGMMODE_PAE_NX:
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync PX86PDPT pShwPdpt;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync PX86PDPAE pShwPaePd;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync const unsigned iPdPt = iNewPDE / 256;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync unsigned iPDE = iNewPDE * 2 % 512;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync Assert(pShwPdpt);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync if (!pShwPaePd)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync X86PDPE GstPdpe;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync if (PGMGetGuestMode(pVM) < PGMMODE_PAE)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync else
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync PX86PDPE pGstPdpe;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pGstPdpe = pgmGstGetPaePDPEPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync if (pGstPdpe)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync GstPdpe = *pGstPdpe;
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync else
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync GstPdpe.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync int rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync AssertFatal(RT_SUCCESS(rc));
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync if (rc != VINF_SUCCESS)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync rc = pgmShwSyncPaePDPtr(pVM, (iPdPt << X86_PDPT_SHIFT), &GstPdpe, &pShwPaePd);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync AssertFatalMsg(rc == VINF_SUCCESS, ("rc = %Rrc\n", rc));
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync#endif
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync AssertFatal(pShwPaePd);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync PPGMPOOLPAGE pPoolPagePde = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync AssertFatal(pPoolPagePde);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync if (pShwPaePd->a[iPDE].n.u1Present)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync X86PDEPAE PdePae0;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pShwPaePd->a[iPDE] = PdePae0;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync /* 2nd 2 MB PDE of the 4 MB region */
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync iPDE++;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync AssertFatal(iPDE < 512);
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync if (pShwPaePd->a[iPDE].n.u1Present)
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync {
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
e794d9028f49f95cf5a9dc4cb9ce2aa2231eac04vboxsync }
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync X86PDEPAE PdePae1;
7a61a5714b9a39ac3bd59e52b0843ef498350a35vboxsync PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync pShwPaePd->a[iPDE] = PdePae1;
9b5bf00cddef78a2e5ab748a141ea830ce47abe2vboxsync
682a27d94b9116c719038882487b99053985f91avboxsync /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
break;
}
default:
AssertFailed();
break;
}
}
}
/**
* Clears all PDEs involved with the mapping in the shadow page table.
*
* @param pVM The VM handle.
* @param pShwPageCR3 CR3 root page
* @param pMap Pointer to the mapping in question.
* @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping.
*/
void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE)
{
Log(("pgmMapClearShadowPDEs old pde %x (mappings enabled %d)\n", iOldPDE, pgmMapAreMappingsEnabled(&pVM->pgm.s)));
if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
return;
#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
Assert(pShwPageCR3);
#endif
unsigned i = pMap->cPTs;
PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
iOldPDE += i;
while (i-- > 0)
{
iOldPDE--;
switch(enmShadowMode)
{
case PGMMODE_32_BIT:
{
#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
#else
PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
#endif
AssertFatal(pShw32BitPd);
pShw32BitPd->a[iOldPDE].u = 0;
break;
}
case PGMMODE_PAE:
case PGMMODE_PAE_NX:
{
PX86PDPT pPdpt = NULL;
PX86PDPAE pShwPaePd = NULL;
const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
unsigned iPDE = iOldPDE * 2 % 512;
#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
#else
pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
#endif
AssertFatal(pShwPaePd);
pShwPaePd->a[iPDE].u = 0;
iPDE++;
AssertFatal(iPDE < 512);
pShwPaePd->a[iPDE].u = 0;
/* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
pPdpt->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
break;
}
default:
AssertFailed();
break;
}
}
}
#endif /* !IN_RING0 */
/**
* Apply the hypervisor mappings to the active CR3.
*
* @returns VBox status.
* @param pVM The virtual machine.
*/
VMMDECL(int) PGMMapActivateAll(PVM pVM)
{
Log(("PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
/*
* Can skip this if mappings are safely fixed.
*/
if (pVM->pgm.s.fMappingsFixed)
return VINF_SUCCESS;
#ifdef IN_RING0
AssertFailed();
return VERR_INTERNAL_ERROR;
#else
# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
# endif
/*
* Iterate mappings.
*/
for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
{
unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
pgmMapSetShadowPDEs(pVM, pCur, iPDE);
}
return VINF_SUCCESS;
#endif /* IN_RING0 */
}
/**
* Remove the hypervisor mappings from the active CR3
*
* @returns VBox status.
* @param pVM The virtual machine.
*/
VMMDECL(int) PGMMapDeactivateAll(PVM pVM)
{
Log(("PGMMapDeactivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
/*
* Can skip this if mappings are safely fixed.
*/
if (pVM->pgm.s.fMappingsFixed)
return VINF_SUCCESS;
#ifdef IN_RING0
AssertFailed();
return VERR_INTERNAL_ERROR;
#else
# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
# endif
/*
* Iterate mappings.
*/
for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
{
unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
pgmMapClearShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
}
return VINF_SUCCESS;
#endif /* IN_RING0 */
}
/**
* Remove the hypervisor mappings from the specified CR3
*
* @returns VBox status.
* @param pVM The virtual machine.
* @param pShwPageCR3 CR3 root page
*/
int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
{
/*
* Can skip this if mappings are safely fixed.
*/
if (pVM->pgm.s.fMappingsFixed)
return VINF_SUCCESS;
#ifdef IN_RING0
AssertFailed();
return VERR_INTERNAL_ERROR;
#else
# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
# endif
/*
* Iterate mappings.
*/
for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
{
unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
pgmMapClearShadowPDEs(pVM, pShwPageCR3, pCur, iPDE);
}
return VINF_SUCCESS;
#endif /* IN_RING0 */
}
#ifndef IN_RING0
/**
* Checks guest PD for conflicts with VMM GC mappings.
*
* @returns true if conflict detected.
* @returns false if not.
* @param pVM The virtual machine.
*/
VMMDECL(bool) PGMMapHasConflicts(PVM pVM)
{
/*
* Can skip this if mappings are safely fixed.
*/
if (pVM->pgm.s.fMappingsFixed)
return false;
PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
Assert(enmGuestMode <= PGMMODE_PAE_NX);
/*
* Iterate mappings.
*/
if (enmGuestMode == PGMMODE_32_BIT)
{
/*
* Resolve the page directory.
*/
PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
Assert(pPD);
for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
{
unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
unsigned iPT = pCur->cPTs;
while (iPT-- > 0)
if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
&& (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
{
STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
#ifdef IN_RING3
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
(iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
#else
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
(iPT + iPDE) << X86_PD_SHIFT,
iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
#endif
return true;
}
}
}
else if ( enmGuestMode == PGMMODE_PAE
|| enmGuestMode == PGMMODE_PAE_NX)
{
for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
{
RTGCPTR GCPtr = pCur->GCPtr;
unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
while (iPT-- > 0)
{
X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
if ( Pde.n.u1Present
&& (pVM->fRawR0Enabled || Pde.n.u1User))
{
STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
#ifdef IN_RING3
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
" PDE=%016RX64.\n",
GCPtr, pCur->pszDesc, Pde.u));
#else
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
" PDE=%016RX64.\n",
GCPtr, Pde.u));
#endif
return true;
}
GCPtr += (1 << X86_PD_PAE_SHIFT);
}
}
}
else
AssertFailed();
return false;
}
# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
/**
* Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
*
* @returns VBox status.
* @param pVM The virtual machine.
*/
VMMDECL(int) PGMMapResolveConflicts(PVM pVM)
{
/*
* Can skip this if mappings are safely fixed.
*/
if (pVM->pgm.s.fMappingsFixed)
return VINF_SUCCESS;
PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
Assert(enmGuestMode <= PGMMODE_PAE_NX);
/*
* Iterate mappings.
*/
if (enmGuestMode == PGMMODE_32_BIT)
{
/*
* Resolve the page directory.
*/
PX86PD pPD = pgmGstGet32bitPDPtr(&pVM->pgm.s);
Assert(pPD);
for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
{
unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
unsigned iPT = pCur->cPTs;
while (iPT-- > 0)
{
if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */
&& (pVM->fRawR0Enabled || pPD->a[iPDE + iPT].n.u1User))
{
STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
#ifdef IN_RING3
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
(iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
int rc = pgmR3SyncPTResolveConflict(pVM, pCur, pPD, iPDE << X86_PD_SHIFT);
AssertRCReturn(rc, rc);
/*
* Update pCur.
*/
pCur = pVM->pgm.s.CTX_SUFF(pMappings);
while (pCur && pCur->GCPtr < (iPDE << X86_PD_SHIFT))
pCur = pCur->CTX_SUFF(pNext);
break;
#else
Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
" iPDE=%#x iPT=%#x PDE=%RGp.\n",
(iPT + iPDE) << X86_PD_SHIFT,
iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
return VINF_PGM_SYNC_CR3;
#endif
}
}
if (!pCur)
break;
}
}
else if ( enmGuestMode == PGMMODE_PAE
|| enmGuestMode == PGMMODE_PAE_NX)
{
for (PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); pCur; pCur = pCur->CTX_SUFF(pNext))
{
RTGCPTR GCPtr = pCur->GCPtr;
unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT;
while (iPT-- > 0)
{
X86PDEPAE Pde = pgmGstGetPaePDE(&pVM->pgm.s, GCPtr);
if ( Pde.n.u1Present
&& (pVM->fRawR0Enabled || Pde.n.u1User))
{
STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts);
#ifdef IN_RING3
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
" PDE=%016RX64.\n",
GCPtr, pCur->pszDesc, Pde.u));
int rc = pgmR3SyncPTResolveConflictPAE(pVM, pCur, GCPtr);
AssertRCReturn(rc, rc);
/*
* Update pCur.
*/
pCur = pVM->pgm.s.CTX_SUFF(pMappings);
while (pCur && pCur->GCPtr < GCPtr)
pCur = pCur->CTX_SUFF(pNext);
break;
#else
Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
" PDE=%016RX64.\n",
GCPtr, Pde.u));
return VINF_PGM_SYNC_CR3;
#endif
}
GCPtr += (1 << X86_PD_PAE_SHIFT);
}
if (!pCur)
break;
}
}
else
AssertFailed();
return VINF_SUCCESS;
}
# endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
#endif /* IN_RING0 */