memobj-r0drv-freebsd.c revision d272a8d07d3f79b66760a49b8be4eae380ecd519
/* $Id$ */
/** @file
* IPRT - Ring-0 Memory Objects, FreeBSD.
*/
/*
* Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "the-freebsd-kernel.h"
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* The FreeBSD version of the memory object structure.
*/
typedef struct RTR0MEMOBJFREEBSD
{
/** The core structure. */
/** The VM object associated with the allocation. */
/** the VM object associated with the mapping.
* In mapping mem object, this is the shadow object?
* In a allocation/enter mem object, this is the shared object we constructed (contig, perhaps alloc). */
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
{
int rc;
{
case RTR0MEMOBJTYPE_CONT:
if (pMemFreeBSD->pMappingObject)
{
}
break;
case RTR0MEMOBJTYPE_PAGE:
if (pMemFreeBSD->pObject)
{
}
else
{
if (pMemFreeBSD->pMappingObject)
{
}
}
break;
case RTR0MEMOBJTYPE_LOCK:
{
int fFlags = VM_MAP_WIRE_NOHOLES;
{
}
else
fFlags);
break;
}
case RTR0MEMOBJTYPE_RES_VIRT:
{
break;
}
case RTR0MEMOBJTYPE_MAPPING:
{
/* vm_map_remove will unmap the pages we inserted with pmap_enter */
break;
}
/* unused: */
case RTR0MEMOBJTYPE_LOW:
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PHYS_NC:
default:
return VERR_INTERNAL_ERROR;
}
return VINF_SUCCESS;
}
{
int rc;
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PAGE, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/*
* We've two options here both expressed nicely by how kld allocates
* memory for the module bits:
*/
#if 0
{
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
#else
if (pMemFreeBSD->pObject)
{
0, /* offset */
&MapAddress, /* addr (IN/OUT) */
cb, /* length */
TRUE, /* find_space */
fExecutable /* protection */
: VM_PROT_RW,
VM_PROT_ALL, /* max(_prot) */
FALSE); /* cow (copy-on-write) */
if (rc == KERN_SUCCESS)
{
MapAddress, /* start */
if (rc == KERN_SUCCESS)
{
return VINF_SUCCESS;
}
MapAddress + cb);
}
}
else
rc = VERR_NO_MEMORY;
#endif
return rc;
}
{
/*
* Try a Alloc first and see if we get luck, if not try contigmalloc.
* Might wish to try find our own pages or something later if this
* turns into a problemspot on AMD64 boxes.
*/
if (RT_SUCCESS(rc))
{
while (iPage-- > 0)
{
RTR0MemObjFree(*ppMem, false);
rc = VERR_NO_MEMORY;
break;
}
}
if (RT_FAILURE(rc))
return rc;
}
{
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/* do the allocation. */
M_IPRTMOBJ, /* type */
0, /* lowest physical address*/
PAGE_SIZE, /* alignment. */
0); /* boundrary */
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;
}
{
/** @todo check if there is a more appropriate API somewhere.. */
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/* do the allocation. */
M_IPRTMOBJ, /* type */
0, /* lowest physical address*/
PhysHighest, /* highest physical address */
PAGE_SIZE, /* alignment. */
0); /* boundrary */
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;
}
{
/** @todo rtR0MemObjNativeAllocPhys / freebsd */
return VERR_NOT_SUPPORTED;
}
{
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/* there is no allocation here, it needs to be mapped somewhere first. */
return VINF_SUCCESS;
}
int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
{
int rc;
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/*
* We could've used vslock here, but we don't wish to be subject to
* resource usage restrictions, so we'll call vm_map_wire directly.
*/
if (rc == KERN_SUCCESS)
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
}
{
int rc;
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/* lock the memory */
if (rc == KERN_SUCCESS)
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
}
/**
* Worker for the two virtual address space reservers.
*
* We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
*/
static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
{
int rc;
/*
* The pvFixed address range must be within the VM space when specified.
*/
if (pvFixed != (void *)-1
return VERR_INVALID_PARAMETER;
/*
* Create the object.
*/
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/*
* Allocate an empty VM object and map it into the requested map.
*/
if (pMemFreeBSD->pObject)
{
: vm_map_min(pMap);
if (pvFixed)
MapAddress + cb);
0, /* offset */
&MapAddress, /* addr (IN/OUT) */
cb, /* length */
VM_PROT_NONE, /* protection */
VM_PROT_ALL, /* max(_prot) ?? */
0); /* cow (copy-on-write) */
if (rc == KERN_SUCCESS)
{
if (R0Process != NIL_RTR0PROCESS)
{
MapAddress + cb,
}
return VINF_SUCCESS;
}
}
else
rc = VERR_NO_MEMORY;
return rc;
}
int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
{
}
int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
{
}
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
{
/* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */
#if 0
/** @todo finish the implementation. */
int rc;
{
/*
* These has kernel mappings.
*/
case RTR0MEMOBJTYPE_PAGE:
case RTR0MEMOBJTYPE_LOW:
case RTR0MEMOBJTYPE_CONT:
break;
case RTR0MEMOBJTYPE_PHYS_NC:
case RTR0MEMOBJTYPE_PHYS:
if (!pvR0)
{
/* no ring-0 mapping, so allocate a mapping in the process. */
if (rc)
return RTErrConvertFromOS2(rc);
}
break;
case RTR0MEMOBJTYPE_LOCK:
return VERR_NOT_SUPPORTED; /** @todo implement this... */
break;
case RTR0MEMOBJTYPE_RES_VIRT:
case RTR0MEMOBJTYPE_MAPPING:
default:
return VERR_INTERNAL_ERROR;
}
/*
* Create a dummy mapping object for it.
*
* any cache options, so sharing is ok. And the main memory object
* isn't actually freed until all the mappings have been freed up
* (reference counting).
*/
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR0, pMemToMapOs2->Core.cb);
if (pMemFreeBSD)
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;
#endif
return VERR_NOT_IMPLEMENTED;
}
int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
{
AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
int rc;
/* calc protection */
vm_prot_t ProtectionFlags = 0;
/* calc mapping address */
vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
/*
* Mapping into R3 is easy if the mem object has a associated VM object.
* If there is not such an object we have to get it from the address.
*/
if (!pObjectToMap)
{
if (!RT_UNLIKELY(pObjectNew))
return VERR_NO_MEMORY;
/* Insert the object in the map. */
pObjectNew , /* Object to map */
0, /* Start offset in the object */
TRUE, /* Whether a suitable address should be searched for first */
ProtectionFlags, /* protection flags */
VM_PROT_ALL, /* Maximum protection flags */
0); /* Copy on write */
if (rc == KERN_SUCCESS)
{
/* Insert the memory page by page into the mapping. */
while (cLeft-- > 0)
{
#if __FreeBSD_version >= 800002
#else
#endif
}
}
else
}
else
{
/*
* Reference the object. If this isn't done the object will removed from kernel space
* if the mapping is destroyed.
*/
pObjectToMap, /* Object to map */
0, /* Start offset in the object */
TRUE, /* Whether a suitable address should be searched for first */
ProtectionFlags, /* protection flags */
VM_PROT_ALL, /* Maximum protection flags */
0); /* Copy on write */
}
if (rc == KERN_SUCCESS)
{
/*
* Create a mapping object for it.
*/
(void *)AddrR3,
if (pMemFreeBSD)
{
return VINF_SUCCESS;
}
}
if (pObjectToMap)
return VERR_NO_MEMORY;
}
{
{
case RTR0MEMOBJTYPE_LOCK:
{
/* later */
return NIL_RTHCPHYS;
}
/* fall thru*/
case RTR0MEMOBJTYPE_PAGE:
case RTR0MEMOBJTYPE_MAPPING:
{
}
case RTR0MEMOBJTYPE_CONT:
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PHYS_NC:
case RTR0MEMOBJTYPE_RES_VIRT:
case RTR0MEMOBJTYPE_LOW:
default:
return NIL_RTHCPHYS;
}
}