memobj-r0drv-nt.cpp revision fbb3513a43135c633f7f51544c4bdfce748929bf
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/* $Id$ */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** @file
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * IPRT - Ring-0 Memory Objects, NT.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/*
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Copyright (C) 2006-2007 Sun Microsystems, Inc.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * available from http://www.virtualbox.org. This file is free software;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * you can redistribute it and/or modify it under the terms of the GNU
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * General Public License (GPL) as published by the Free Software
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The contents of this file may alternatively be used under the terms
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * of the Common Development and Distribution License Version 1.0
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * VirtualBox OSE distribution, in which case the provisions of the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * CDDL are applicable instead of those of the GPL.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * You may elect to license modified versions of this file under the
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * terms and conditions of either the GPL or the CDDL or both.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * additional information or have any questions.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/*******************************************************************************
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync* Header Files *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync*******************************************************************************/
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#include "the-nt-kernel.h"
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#include <iprt/memobj.h>
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#include <iprt/alloc.h>
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#include <iprt/assert.h>
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#include <iprt/log.h>
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#include <iprt/param.h>
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#include <iprt/string.h>
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#include <iprt/process.h>
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#include "internal/memobj.h"
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/*******************************************************************************
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync* Defined Constants And Macros *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync*******************************************************************************/
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/** Maximum number of bytes we try to lock down in one go.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * This is supposed to have a limit right below 256MB, but this appears
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * to actually be much lower. The values here have been determined experimentally.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#ifdef RT_ARCH_X86
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#endif
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#ifdef RT_ARCH_AMD64
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#endif
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/*******************************************************************************
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync* Structures and Typedefs *
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync*******************************************************************************/
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/**
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * The NT version of the memory object structure.
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsynctypedef struct RTR0MEMOBJNT
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync{
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The core structure. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync RTR0MEMOBJINTERNAL Core;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#ifndef IPRT_TARGET_NT4
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Used MmAllocatePagesForMdl(). */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync bool fAllocatedPagesForMdl;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#endif
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Pointer returned by MmSecureVirtualMemory */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PVOID pvSecureMem;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** The number of PMDLs (memory descriptor lists) in the array. */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync uint32_t cMdls;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /** Array of MDL pointers. (variable size) */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PMDL apMdls[1];
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync} RTR0MEMOBJNT, *PRTR0MEMOBJNT;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsyncint rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync{
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync /*
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync * Deal with it on a per type basis (just as a variation).
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync */
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync switch (pMemNt->Core.enmType)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case RTR0MEMOBJTYPE_LOW:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#ifndef IPRT_TARGET_NT4
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (pMemNt->fAllocatedPagesForMdl)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->Core.pv = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (pMemNt->pvSecureMem)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->pvSecureMem = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync }
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync MmFreePagesFromMdl(pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ExFreePool(pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->apMdls[0] = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->cMdls = 0;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync }
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#endif
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertFailed();
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case RTR0MEMOBJTYPE_PAGE:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pMemNt->Core.pv);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ExFreePool(pMemNt->Core.pv);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->Core.pv = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync IoFreeMdl(pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->apMdls[0] = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->cMdls = 0;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case RTR0MEMOBJTYPE_CONT:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pMemNt->Core.pv);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync MmFreeContiguousMemory(pMemNt->Core.pv);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->Core.pv = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync IoFreeMdl(pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->apMdls[0] = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->cMdls = 0;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case RTR0MEMOBJTYPE_PHYS:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case RTR0MEMOBJTYPE_PHYS_NC:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#ifndef IPRT_TARGET_NT4
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (pMemNt->fAllocatedPagesForMdl)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync MmFreePagesFromMdl(pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync ExFreePool(pMemNt->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->apMdls[0] = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->cMdls = 0;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync }
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync#endif
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertFailed();
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case RTR0MEMOBJTYPE_LOCK:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (pMemNt->pvSecureMem)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->pvSecureMem = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync }
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync for (uint32_t i = 0; i < pMemNt->cMdls; i++)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync MmUnlockPages(pMemNt->apMdls[i]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync IoFreeMdl(pMemNt->apMdls[i]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync pMemNt->apMdls[i] = NULL;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync }
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case RTR0MEMOBJTYPE_RES_VIRT:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync }
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync else
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync }*/
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync return VERR_INTERNAL_ERROR;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync break;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync case RTR0MEMOBJTYPE_MAPPING:
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pMemNtParent);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync if (pMemNtParent->cMdls)
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync {
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync }
0b74a2f80aba476dc8be8bc1c63891fc53945986vboxsync else
{
Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
&& !pMemNtParent->Core.u.Phys.fAllocated);
Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
}
pMemNt->Core.pv = NULL;
break;
}
default:
AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
return VERR_INTERNAL_ERROR;
}
return VINF_SUCCESS;
}
int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
/*
* Try allocate the memory and create an MDL for them so
* we can query the physical addresses and do mappings later
* without running into out-of-memory conditions and similar problems.
*/
int rc = VERR_NO_PAGE_MEMORY;
void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
if (pv)
{
PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
if (pMdl)
{
MmBuildMdlForNonPagedPool(pMdl);
#ifdef RT_ARCH_AMD64
MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
#endif
/*
* Create the IPRT memory object.
*/
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
if (pMemNt)
{
pMemNt->cMdls = 1;
pMemNt->apMdls[0] = pMdl;
*ppMem = &pMemNt->Core;
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
IoFreeMdl(pMdl);
}
ExFreePool(pv);
}
return rc;
}
int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
/*
* Try see if we get lucky first...
* (We could probably just assume we're lucky on NT4.)
*/
int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
if (RT_SUCCESS(rc))
{
size_t iPage = cb >> PAGE_SHIFT;
while (iPage-- > 0)
if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
{
rc = VERR_NO_MEMORY;
break;
}
if (RT_SUCCESS(rc))
return rc;
/* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
RTR0MemObjFree(*ppMem, false);
*ppMem = NULL;
}
#ifndef IPRT_TARGET_NT4
/*
* Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
*/
PHYSICAL_ADDRESS Zero;
Zero.QuadPart = 0;
PHYSICAL_ADDRESS HighAddr;
HighAddr.QuadPart = _4G - 1;
PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
if (pMdl)
{
if (MmGetMdlByteCount(pMdl) >= cb)
{
__try
{
void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
FALSE /* no bug check on failure */, NormalPagePriority);
if (pv)
{
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
if (pMemNt)
{
pMemNt->fAllocatedPagesForMdl = true;
pMemNt->cMdls = 1;
pMemNt->apMdls[0] = pMdl;
*ppMem = &pMemNt->Core;
return VINF_SUCCESS;
}
MmUnmapLockedPages(pv, pMdl);
}
}
__except(EXCEPTION_EXECUTE_HANDLER)
{
NTSTATUS rcNt = GetExceptionCode();
Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
/* nothing */
}
}
MmFreePagesFromMdl(pMdl);
ExFreePool(pMdl);
}
#endif /* !IPRT_TARGET_NT4 */
/*
* Fall back on contiguous memory...
*/
return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
}
/**
* Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
* and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
* to what rtR0MemObjNativeAllocCont() does.
*
* @returns IPRT status code.
* @param ppMem Where to store the pointer to the ring-0 memory object.
* @param cb The size.
* @param fExecutable Whether the mapping should be executable or not.
* @param PhysHighest The highest physical address for the pages in allocation.
*/
static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
{
AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
/*
* Allocate the memory and create an MDL for it.
*/
PHYSICAL_ADDRESS PhysAddrHighest;
PhysAddrHighest.QuadPart = PhysHighest;
void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
if (!pv)
return VERR_NO_MEMORY;
PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
if (pMdl)
{
MmBuildMdlForNonPagedPool(pMdl);
#ifdef RT_ARCH_AMD64
MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
#endif
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
if (pMemNt)
{
pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
pMemNt->cMdls = 1;
pMemNt->apMdls[0] = pMdl;
*ppMem = &pMemNt->Core;
return VINF_SUCCESS;
}
IoFreeMdl(pMdl);
}
MmFreeContiguousMemory(pv);
return VERR_NO_MEMORY;
}
int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1);
}
int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
{
#ifndef IPRT_TARGET_NT4
/*
* Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
*
* This is preferable to using MmAllocateContiguousMemory because there are
* a few situations where the memory shouldn't be mapped, like for instance
* VT-x control memory. Since these are rather small allocations (one or
* two pages) MmAllocatePagesForMdl will probably be able to satisfy the
* request.
*
* If the allocation is big, the chances are *probably* not very good. The
* current limit is kind of random...
*/
if (cb < _128K)
{
PHYSICAL_ADDRESS Zero;
Zero.QuadPart = 0;
PHYSICAL_ADDRESS HighAddr;
HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
if (pMdl)
{
if (MmGetMdlByteCount(pMdl) >= cb)
{
PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
PFN_NUMBER Pfn = paPfns[0] + 1;
const size_t cPages = cb >> PAGE_SHIFT;
size_t iPage;
for (iPage = 1; iPage < cPages; iPage++, Pfn++)
if (paPfns[iPage] != Pfn)
break;
if (iPage >= cPages)
{
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
if (pMemNt)
{
pMemNt->Core.u.Phys.fAllocated = true;
pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
pMemNt->fAllocatedPagesForMdl = true;
pMemNt->cMdls = 1;
pMemNt->apMdls[0] = pMdl;
*ppMem = &pMemNt->Core;
return VINF_SUCCESS;
}
}
}
MmFreePagesFromMdl(pMdl);
ExFreePool(pMdl);
}
}
#endif /* !IPRT_TARGET_NT4 */
return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest);
}
int rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
{
#ifndef IPRT_TARGET_NT4
PHYSICAL_ADDRESS Zero;
Zero.QuadPart = 0;
PHYSICAL_ADDRESS HighAddr;
HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
if (pMdl)
{
if (MmGetMdlByteCount(pMdl) >= cb)
{
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
if (pMemNt)
{
pMemNt->fAllocatedPagesForMdl = true;
pMemNt->cMdls = 1;
pMemNt->apMdls[0] = pMdl;
*ppMem = &pMemNt->Core;
return VINF_SUCCESS;
}
}
MmFreePagesFromMdl(pMdl);
ExFreePool(pMdl);
}
return VERR_NO_MEMORY;
#else /* IPRT_TARGET_NT4 */
return VERR_NOT_SUPPORTED;
#endif /* IPRT_TARGET_NT4 */
}
int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
{
/*
* Validate the address range and create a descriptor for it.
*/
PFN_NUMBER Pfn = (PFN_NUMBER)(Phys >> PAGE_SHIFT);
if (((RTHCPHYS)Pfn << PAGE_SHIFT) != Phys)
return VERR_ADDRESS_TOO_BIG;
/*
* Create the IPRT memory object.
*/
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
if (pMemNt)
{
pMemNt->Core.u.Phys.PhysBase = Phys;
pMemNt->Core.u.Phys.fAllocated = false;
*ppMem = &pMemNt->Core;
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;
}
/**
* Internal worker for locking down pages.
*
* @return IPRT status code.
*
* @param ppMem Where to store the memory object pointer.
* @param pv First page.
* @param cb Number of bytes.
* @param R0Process The process \a pv and \a cb refers to.
*/
static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
{
/*
* Calc the number of MDLs we need and allocate the memory object structure.
*/
size_t cMdls = cb / MAX_LOCK_MEM_SIZE;
if (cb % MAX_LOCK_MEM_SIZE)
cMdls++;
if (cMdls >= UINT32_MAX)
return VERR_OUT_OF_RANGE;
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
RTR0MEMOBJTYPE_LOCK, pv, cb);
if (!pMemNt)
return VERR_NO_MEMORY;
/*
* Loop locking down the sub parts of the memory.
*/
int rc = VINF_SUCCESS;
size_t cbTotal = 0;
uint8_t *pb = (uint8_t *)pv;
uint32_t iMdl;
for (iMdl = 0; iMdl < cMdls; iMdl++)
{
/*
* Calc the Mdl size and allocate it.
*/
size_t cbCur = cb - cbTotal;
if (cbCur > MAX_LOCK_MEM_SIZE)
cbCur = MAX_LOCK_MEM_SIZE;
AssertMsg(cbCur, ("cbCur: 0!\n"));
PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
if (!pMdl)
{
rc = VERR_NO_MEMORY;
break;
}
/*
* Lock the pages.
*/
__try
{
MmProbeAndLockPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, IoModifyAccess);
pMemNt->apMdls[iMdl] = pMdl;
pMemNt->cMdls++;
}
__except(EXCEPTION_EXECUTE_HANDLER)
{
IoFreeMdl(pMdl);
rc = VERR_LOCK_FAILED;
break;
}
if (R0Process != NIL_RTR0PROCESS )
{
/* Make sure the user process can't change the allocation. */
pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb, PAGE_READWRITE);
if (!pMemNt->pvSecureMem)
{
rc = VERR_NO_MEMORY;
break;
}
}
/* next */
cbTotal += cbCur;
pb += cbCur;
}
if (RT_SUCCESS(rc))
{
Assert(pMemNt->cMdls == cMdls);
pMemNt->Core.u.Lock.R0Process = R0Process;
*ppMem = &pMemNt->Core;
return rc;
}
/*
* We failed, perform cleanups.
*/
while (iMdl-- > 0)
{
MmUnlockPages(pMemNt->apMdls[iMdl]);
IoFreeMdl(pMemNt->apMdls[iMdl]);
pMemNt->apMdls[iMdl] = NULL;
}
if (pMemNt->pvSecureMem)
{
MmUnsecureVirtualMemory(pMemNt->pvSecureMem);
pMemNt->pvSecureMem = NULL;
}
rtR0MemObjDelete(&pMemNt->Core);
return rc;
}
int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
{
AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
/* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
}
int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
{
return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS);
}
int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
{
/*
* MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
*/
return VERR_NOT_IMPLEMENTED;
}
int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
{
/*
* ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
*/
return VERR_NOT_IMPLEMENTED;
}
/**
* Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
*
* @returns IPRT status code.
* @param ppMem Where to store the memory object for the mapping.
* @param pMemToMap The memory object to map.
* @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
* @param uAlignment The alignment requirement for the mapping.
* @param fProt The desired page protection for the mapping.
* @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
* If not nil, it's the current process.
*/
static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
unsigned fProt, RTR0PROCESS R0Process)
{
int rc = VERR_MAP_FAILED;
/*
* There are two basic cases here, either we've got an MDL and can
* map it using MmMapLockedPages, or we've got a contiguous physical
* range (MMIO most likely) and can use MmMapIoSpace.
*/
PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
if (pMemNtToMap->cMdls)
{
/* don't attempt map locked regions with more than one mdl. */
if (pMemNtToMap->cMdls != 1)
return VERR_NOT_SUPPORTED;
/* we can't map anything to the first page, sorry. */
if (pvFixed == 0)
return VERR_NOT_SUPPORTED;
/* only one system mapping for now - no time to figure out MDL restrictions right now. */
if ( pMemNtToMap->Core.uRel.Parent.cMappings
&& R0Process == NIL_RTR0PROCESS)
return VERR_NOT_SUPPORTED;
__try
{
/** @todo uAlignment */
/** @todo How to set the protection on the pages? */
void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
MmCached,
pvFixed != (void *)-1 ? pvFixed : NULL,
FALSE /* no bug check on failure */,
NormalPagePriority);
if (pv)
{
NOREF(fProt);
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
pMemNtToMap->Core.cb);
if (pMemNt)
{
pMemNt->Core.u.Mapping.R0Process = R0Process;
*ppMem = &pMemNt->Core;
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
MmUnmapLockedPages(pv, pMemNtToMap->apMdls[0]);
}
}
__except(EXCEPTION_EXECUTE_HANDLER)
{
NTSTATUS rcNt = GetExceptionCode();
Log(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
/* nothing */
rc = VERR_MAP_FAILED;
}
}
else
{
AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
&& !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
/* cannot map phys mem to user space (yet). */
if (R0Process != NIL_RTR0PROCESS)
return VERR_NOT_SUPPORTED;
/** @todo uAlignment */
/** @todo How to set the protection on the pages? */
PHYSICAL_ADDRESS Phys;
Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
if (pv)
{
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
pMemNtToMap->Core.cb);
if (pMemNt)
{
pMemNt->Core.u.Mapping.R0Process = R0Process;
*ppMem = &pMemNt->Core;
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
}
}
NOREF(uAlignment); NOREF(fProt);
return rc;
}
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
unsigned fProt, size_t offSub, size_t cbSub)
{
AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
}
int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
{
AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
}
RTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
{
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;
if (pMemNt->cMdls)
{
if (pMemNt->cMdls == 1)
{
PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
}
size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
}
switch (pMemNt->Core.enmType)
{
case RTR0MEMOBJTYPE_MAPPING:
return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
case RTR0MEMOBJTYPE_PHYS:
return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);
case RTR0MEMOBJTYPE_PAGE:
case RTR0MEMOBJTYPE_PHYS_NC:
case RTR0MEMOBJTYPE_LOW:
case RTR0MEMOBJTYPE_CONT:
case RTR0MEMOBJTYPE_LOCK:
default:
AssertMsgFailed(("%d\n", pMemNt->Core.enmType));
case RTR0MEMOBJTYPE_RES_VIRT:
return NIL_RTHCPHYS;
}
}