memobj-r0drv-nt.cpp revision 1c94c0a63ba68be1a7b2c640e70d7a06464e4fca
/* $Id$ */
/** @file
* innotek Portable Runtime - Ring-0 Memory Objects, NT.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "the-nt-kernel.h"
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
/** Maximum number of bytes we try to lock down in one go.
* This is supposed to have a limit right below 256MB, but this appears
* to actually be much lower. The values here have been determined experimentally.
*/
#ifdef RT_ARCH_X86
#endif
#ifdef RT_ARCH_AMD64
#endif
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* The NT version of the memory object structure.
*/
typedef struct RTR0MEMOBJNT
{
/** The core structure. */
#ifndef IPRT_TARGET_NT4
/** Used MmAllocatePagesForMdl(). */
bool fAllocatedPagesForMdl;
#endif
/** Pointer returned by MmSecureVirtualMemory */
/** The number of PMDLs (memory descriptor lists) in the array. */
/** Array of MDL pointers. (variable size) */
{
/*
* Deal with it on a per type basis (just as a variation).
*/
{
case RTR0MEMOBJTYPE_LOW:
#ifndef IPRT_TARGET_NT4
if (pMemNt->fAllocatedPagesForMdl)
{
if (pMemNt->pvSecureMem)
{
}
break;
}
#endif
AssertFailed();
break;
case RTR0MEMOBJTYPE_PAGE:
break;
case RTR0MEMOBJTYPE_CONT:
break;
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PHYS_NC:
#ifndef IPRT_TARGET_NT4
if (pMemNt->fAllocatedPagesForMdl)
{
break;
}
#endif
AssertFailed();
break;
case RTR0MEMOBJTYPE_LOCK:
if (pMemNt->pvSecureMem)
{
}
{
}
break;
case RTR0MEMOBJTYPE_RES_VIRT:
/* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS)
{
}
else
{
}*/
AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
return VERR_INTERNAL_ERROR;
break;
case RTR0MEMOBJTYPE_MAPPING:
{
if (pMemNtParent->cMdls)
{
}
else
{
}
break;
}
default:
return VERR_INTERNAL_ERROR;
}
return VINF_SUCCESS;
}
{
/*
* Try allocate the memory and create an MDL for them so
* we can query the physical addresses and do mappings later
* without running into out-of-memory conditions and similar problems.
*/
int rc = VERR_NO_PAGE_MEMORY;
if (pv)
{
if (pMdl)
{
#ifdef RT_ARCH_AMD64
#endif
/*
* Create the IPRT memory object.
*/
if (pMemNt)
{
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
}
ExFreePool(pv);
}
return rc;
}
{
/*
* Try see if we get lucky first...
* (We could probably just assume we're lucky on NT4.)
*/
if (RT_SUCCESS(rc))
{
while (iPage-- > 0)
{
rc = VERR_NO_MEMORY;
break;
}
if (RT_SUCCESS(rc))
return rc;
/* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
RTR0MemObjFree(*ppMem, false);
}
#ifndef IPRT_TARGET_NT4
/*
* Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
*/
if (pMdl)
{
{
{
if (pv)
{
if (pMemNt)
{
pMemNt->fAllocatedPagesForMdl = true;
return VINF_SUCCESS;
}
}
}
{
/* nothing */
}
}
}
#endif /* !IPRT_TARGET_NT4 */
/*
* Fall back on contiguous memory...
*/
}
/**
* Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
* and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
* to what rtR0MemObjNativeAllocCont() does.
*
* @returns IPRT status code.
* @param ppMem Where to store the pointer to the ring-0 memory object.
* @param cb The size.
* @param fExecutable Whether the mapping should be executable or not.
* @param PhysHighest The highest physical address for the pages in allocation.
*/
static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest)
{
/*
* Allocate the memory and create an MDL for it.
*/
if (!pv)
return VERR_NO_MEMORY;
if (pMdl)
{
#ifdef RT_ARCH_AMD64
#endif
if (pMemNt)
{
return VINF_SUCCESS;
}
}
return VERR_NO_MEMORY;
}
{
}
{
#ifndef IPRT_TARGET_NT4
/*
* Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
*
* This is preferable to using MmAllocateContiguousMemory because there are
* a few situations where the memory shouldn't be mapped, like for instance
* VT-x control memory. Since these are rather small allocations (one or
* two pages) MmAllocatePagesForMdl will probably be able to satisfy the
* request.
*
* If the allocation is big, the chances are *probably* not very good. The
* current limit is kind of random...
*/
{
if (pMdl)
{
{
break;
{
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
if (pMemNt)
{
pMemNt->fAllocatedPagesForMdl = true;
return VINF_SUCCESS;
}
}
}
}
}
#endif /* !IPRT_TARGET_NT4 */
}
{
#ifndef IPRT_TARGET_NT4
if (pMdl)
{
{
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS_NC, NULL, cb);
if (pMemNt)
{
pMemNt->fAllocatedPagesForMdl = true;
return VINF_SUCCESS;
}
}
}
return VERR_NO_MEMORY;
#else /* IPRT_TARGET_NT4 */
return VERR_NOT_SUPPORTED;
#endif /* IPRT_TARGET_NT4 */
}
{
/*
* Validate the address range and create a descriptor for it.
*/
return VERR_ADDRESS_TOO_BIG;
/*
* Create the IPRT memory object.
*/
PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
if (pMemNt)
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;
}
/**
* Internal worker for locking down pages.
*
* @return IPRT status code.
*
* @param ppMem Where to store the memory object pointer.
* @param pv First page.
* @param cb Number of bytes.
* @param R0Process The process \a pv and \a cb refers to.
*/
{
/*
* Calc the number of MDLs we need and allocate the memory object structure.
*/
if (cb % MAX_LOCK_MEM_SIZE)
cMdls++;
if (cMdls >= UINT32_MAX)
return VERR_OUT_OF_RANGE;
if (!pMemNt)
return VERR_NO_MEMORY;
/*
* Loop locking down the sub parts of the memory.
*/
int rc = VINF_SUCCESS;
{
/*
* Calc the Mdl size and allocate it.
*/
if (cbCur > MAX_LOCK_MEM_SIZE)
if (!pMdl)
{
rc = VERR_NO_MEMORY;
break;
}
/*
* Lock the pages.
*/
{
}
{
break;
}
if (R0Process != NIL_RTR0PROCESS )
{
/* Make sure the user process can't change the allocation. */
if (!pMemNt->pvSecureMem)
{
rc = VERR_NO_MEMORY;
break;
}
}
/* next */
}
if (RT_SUCCESS(rc))
{
return rc;
}
/*
* We failed, perform cleanups.
*/
while (iMdl-- > 0)
{
}
if (pMemNt->pvSecureMem)
{
}
return rc;
}
int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
{
AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
/* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
}
{
}
int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
{
/*
* MmCreateSection(SEC_RESERVE) + MmMapViewInSystemSpace perhaps?
*/
return VERR_NOT_IMPLEMENTED;
}
int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
{
/*
* ZeCreateSection(SEC_RESERVE) + ZwMapViewOfSection perhaps?
*/
return VERR_NOT_IMPLEMENTED;
}
/**
* Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
*
* @returns IPRT status code.
* @param ppMem Where to store the memory object for the mapping.
* @param pMemToMap The memory object to map.
* @param pvFixed Where to map it. (void *)-1 if anywhere is fine.
* @param uAlignment The alignment requirement for the mapping.
* @param fProt The desired page protection for the mapping.
* @param R0Process If NIL_RTR0PROCESS map into system (kernel) memory.
* If not nil, it's the current process.
*/
static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
{
int rc = VERR_MAP_FAILED;
/*
* There are two basic cases here, either we've got an MDL and can
* map it using MmMapLockedPages, or we've got a contiguous physical
* range (MMIO most likely) and can use MmMapIoSpace.
*/
if (pMemNtToMap->cMdls)
{
/* don't attempt map locked regions with more than one mdl. */
return VERR_NOT_SUPPORTED;
/* we can't map anything to the first page, sorry. */
if (pvFixed == 0)
return VERR_NOT_SUPPORTED;
/* only one system mapping for now - no time to figure out MDL restrictions right now. */
&& R0Process == NIL_RTR0PROCESS)
return VERR_NOT_SUPPORTED;
{
/** @todo uAlignment */
/** @todo How to set the protection on the pages? */
FALSE /* no bug check on failure */,
if (pv)
{
if (pMemNt)
{
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
}
}
{
/* nothing */
}
}
else
{
/* cannot map phys mem to user space (yet). */
if (R0Process != NIL_RTR0PROCESS)
return VERR_NOT_SUPPORTED;
/** @todo uAlignment */
/** @todo How to set the protection on the pages? */
void *pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb, MmCached); /** @todo add cache type to fProt. */
if (pv)
{
if (pMemNt)
{
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
}
}
return rc;
}
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment, unsigned fProt)
{
}
int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
{
}
{
{
{
}
}
{
case RTR0MEMOBJTYPE_MAPPING:
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PAGE:
case RTR0MEMOBJTYPE_PHYS_NC:
case RTR0MEMOBJTYPE_LOW:
case RTR0MEMOBJTYPE_CONT:
case RTR0MEMOBJTYPE_LOCK:
default:
case RTR0MEMOBJTYPE_RES_VIRT:
return NIL_RTHCPHYS;
}
}