memobj-r0drv-nt.cpp revision 60680e89b1c6689e75730c2588af97b788f21dd0
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * innotek Portable Runtime - Ring-0 Memory Objects, NT.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * Copyright (C) 2006-2007 innotek GmbH
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * available from http://www.virtualbox.org. This file is free software;
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * you can redistribute it and/or modify it under the terms of the GNU
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * General Public License as published by the Free Software Foundation,
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * distribution. VirtualBox OSE is distributed in the hope that it will
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * be useful, but WITHOUT ANY WARRANTY of any kind.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync/*******************************************************************************
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync* Header Files *
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync*******************************************************************************/
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync/*******************************************************************************
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync* Defined Constants And Macros *
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync*******************************************************************************/
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync/** Maximum number of bytes we try to lock down in one go.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * This is supposed to have a limit right below 256MB, but this appears
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * to actually be much lower. The values here have been determined experimentally.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync# define MAX_LOCK_MEM_SIZE (32*1024*1024) /* 32MB */
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync# define MAX_LOCK_MEM_SIZE (24*1024*1024) /* 24MB */
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync/*******************************************************************************
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync* Structures and Typedefs *
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync*******************************************************************************/
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * The NT version of the memory object structure.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsynctypedef struct RTR0MEMOBJNT
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync /** The core structure. */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync /** Used MmAllocatePagesForMdl(). */
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync /** The number of PMDLs (memory descriptor lists) in the array. */
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync /** Array of MDL pointers. (variable size) */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Deal with it on a per type basis (just as a variation).
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf());
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType));
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsyncint rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync AssertMsgReturn(cb > _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Try allocate the memory and create an MDL for them so
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * we can query the physical addresses and do mappings later
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * without running into out-of-memory conditions and similar problems.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * Create the IPRT memory object.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsyncint rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Try see if we get lucky first...
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * (We could probably just assume we're lucky on NT4.)
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync while (iPage-- > 0)
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync FALSE /* no bug check on failure */, NormalPagePriority);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync /* nothing */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync#endif /* !IPRT_TARGET_NT4 */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Fall back on contiguous memory...
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * to what rtR0MemObjNativeAllocCont() does.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * @returns IPRT status code.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * @param ppMem Where to store the pointer to the ring-0 memory object.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * @param cb The size.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * @param fExecutable Whether the mapping should be executable or not.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * @param PhysHighest The highest physical address for the pages in allocation.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsyncstatic int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Allocate the memory and create an MDL for it.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync void *pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsyncint rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1);
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsyncint rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * This is preferable to using MmAllocateContiguousMemory because there are
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * a few situations where the memory shouldn't be mapped, like for instance
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * VT-x control memory. Since these are rather small allocations (one or
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * If the allocation is big, the chances are *probably* not very good. The
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * current limit is kind of random...
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync#endif /* !IPRT_TARGET_NT4 */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync return rtR0MemObjNativeAllocContEx(ppMem, cb, false, PhysHighest);
c5d2523548cc57504b829f53f1362b848a84542cvboxsyncint rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync#else /* IPRT_TARGET_NT4 */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync#endif /* IPRT_TARGET_NT4 */
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsyncint rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb)
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * Validate the address range and create a descriptor for it.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Create the IPRT memory object.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * Internal worker for locking down pages.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * @return IPRT status code.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * @param ppMem Where to store the memory object pointer.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * @param pv First page.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * @param cb Number of bytes.
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsync * @param Task The task \a pv and \a cb refers to.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsyncstatic int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Calc the number of MDLs we need and allocate the memory object structure.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJNT, apMdls[cMdls]),
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Loop locking down the sub parts of the memory.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Calc the Mdl size and allocate it.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync PMDL pMdl = IoAllocateMdl(pb, (ULONG)cbCur, FALSE, FALSE, NULL);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * Lock the pages.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync MmProbeAndLockPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, IoModifyAccess);
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync * We failed, perform cleanups.
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync while (iMdl-- > 0)
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsyncint rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
e5f6ad85110ebdc7256808a0a3a8ce330370be6avboxsync return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsyncint rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS);
e5bfc5c34142a7550be3564a8e01a037b1db5b31vboxsyncint rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
60680e89b1c6689e75730c2588af97b788f21dd0vboxsync * MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
c5d2523548cc57504b829f53f1362b848a84542cvboxsyncint rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
60680e89b1c6689e75730c2588af97b788f21dd0vboxsync * ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * @returns IPRT status code.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * @param ppMem Where to store the memory object for the mapping.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * @param pMemToMap The memory object to map.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * @param uAlignment The alignment requirement for the mapping.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * @param fProt The desired page protection for the mapping.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * If not nil, it's the current process.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsyncstatic int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * There are two basic cases here, either we've got an MDL and can
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * map it using MmMapLockedPages, or we've got a contiguous physical
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync * range (MMIO most likely) and can use MmMapIoSpace.
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /* don't attempt map locked regions with more than one mdl. */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /* we can't map anything to the first page, sorry. */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /* only one system mapping for now - no time to figure out MDL restrictions right now. */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /** @todo uAlignment */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /** @todo How to set the protection on the pages? */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync void *pv = MmMapLockedPagesSpecifyCache(pMemNtToMap->apMdls[0],
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /* nothing */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync AssertReturn( pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /* cannot map phys mem to user space (yet). */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /** @todo uAlignment */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync /** @todo How to set the protection on the pages? */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsyncint rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsyncint rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync return rtR0MemObjNtMap(ppMem, pMemToMap, (void *)R3PtrFixed, uAlignment, fProt, R0Process);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsyncRTHCPHYS rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);
d534750e38f9bab40fd4ab06d176fea25ec91c44vboxsync return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);