memobj-r0drv-freebsd.c revision 00cc5d93f7446d9394a0f6b7ad790b9fb9d6005c
/* $Id$ */
/** @file
* IPRT - Ring-0 Memory Objects, FreeBSD.
*/
/*
* Copyright (c) 2007 knut st. osmundsen <bird-src-spam@anduin.net>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "the-freebsd-kernel.h"
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* The FreeBSD version of the memory object structure.
*/
typedef struct RTR0MEMOBJFREEBSD
{
/** The core structure. */
/** Type dependent data */
union
{
/** Non physical memory allocations */
struct
{
/** The VM object associated with the allocation. */
} NonPhys;
/** Physical memory allocations */
struct
{
/** Number of pages */
/** Array of pages - variable */
} Phys;
} u;
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
{
int rc;
{
case RTR0MEMOBJTYPE_CONT:
break;
case RTR0MEMOBJTYPE_PAGE:
{
}
else
{
}
break;
case RTR0MEMOBJTYPE_LOCK:
{
int fFlags = VM_MAP_WIRE_NOHOLES;
{
}
else
fFlags);
break;
}
case RTR0MEMOBJTYPE_RES_VIRT:
{
break;
}
case RTR0MEMOBJTYPE_MAPPING:
{
break;
}
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PHYS_NC:
{
break;
}
/* unused: */
case RTR0MEMOBJTYPE_LOW:
default:
return VERR_INTERNAL_ERROR;
}
return VINF_SUCCESS;
}
{
int rc;
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PAGE, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
{
0, /* offset */
&MapAddress, /* addr (IN/OUT) */
cb, /* length */
TRUE, /* find_space */
fExecutable /* protection */
: VM_PROT_RW,
VM_PROT_ALL, /* max(_prot) */
FALSE); /* cow (copy-on-write) */
if (rc == KERN_SUCCESS)
{
rc = VINF_SUCCESS;
{
if (!pPage)
{
/*
* Out of pages
* Remove already allocated pages
*/
while (iPage-- > 0)
{
vm_page_unwire(pPage, 0);
}
rc = VERR_NO_MEMORY;
break;
}
}
if (rc == VINF_SUCCESS)
{
if (fEntryFound)
{
/* Put the page into the page table now. */
{
#if __FreeBSD_version >= 701105
: VM_PROT_RW,
TRUE);
#else
: VM_PROT_RW,
TRUE);
#endif
AddressDst += PAGE_SIZE;
}
/* Store start address */
return VINF_SUCCESS;
}
else
{
AssertFailed();
}
}
}
}
else
rc = VERR_NO_MEMORY;
return rc;
}
{
/*
* Try a Alloc first and see if we get luck, if not try contigmalloc.
* Might wish to try find our own pages or something later if this
* turns into a problemspot on AMD64 boxes.
*/
if (RT_SUCCESS(rc))
{
while (iPage-- > 0)
{
RTR0MemObjFree(*ppMem, false);
rc = VERR_NO_MEMORY;
break;
}
}
if (RT_FAILURE(rc))
return rc;
}
{
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/* do the allocation. */
M_IPRTMOBJ, /* type */
0, /* lowest physical address*/
PAGE_SIZE, /* alignment. */
0); /* boundrary */
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;
}
bool fContiguous)
{
int rc = VINF_SUCCESS;
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJFREEBSD, u.Phys.apPages[cPages]),
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
if (PhysHighest != NIL_RTHCPHYS)
else
VmPhysAddrHigh = ~(vm_paddr_t)0;
if (fContiguous)
{
if (pPage)
else
rc = VERR_NO_MEMORY;
}
else
{
/* Allocate page by page */
{
if (!pPage)
{
/* Free all allocated pages */
while (iPage-- > 0)
rc = VERR_NO_MEMORY;
break;
}
}
}
if (RT_FAILURE(rc))
else
{
if (enmType == RTR0MEMOBJTYPE_PHYS)
{
}
}
return rc;
}
int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
{
#if 0
return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS, cb, PhysHighest, uAlignment, true);
#else
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_CONT, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/* do the allocation. */
M_IPRTMOBJ, /* type */
0, /* lowest physical address*/
uAlignment, /* alignment. */
0); /* boundrary */
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;
#endif
}
{
#if 0
return rtR0MemObjFreeBSDAllocPhysPages(ppMem, RTR0MEMOBJTYPE_PHYS_NC, cb, PhysHighest, PAGE_SIZE, false);
#else
return VERR_NOT_SUPPORTED;
#endif
}
{
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/* there is no allocation here, it needs to be mapped somewhere first. */
return VINF_SUCCESS;
}
int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
int rc;
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/*
* We could've used vslock here, but we don't wish to be subject to
* resource usage restrictions, so we'll call vm_map_wire directly.
*/
if (rc == KERN_SUCCESS)
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
}
{
int rc;
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/* lock the memory */
if (rc == KERN_SUCCESS)
{
return VINF_SUCCESS;
}
return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
}
/**
* Worker for the two virtual address space reservers.
*
* We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
*/
static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
{
int rc;
/*
* The pvFixed address range must be within the VM space when specified.
*/
if (pvFixed != (void *)-1
return VERR_INVALID_PARAMETER;
/*
* Check that the specified alignment is supported.
*/
if (uAlignment > PAGE_SIZE)
return VERR_NOT_SUPPORTED;
/*
* Create the object.
*/
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
if (!pMemFreeBSD)
return VERR_NO_MEMORY;
/*
* Allocate an empty VM object and map it into the requested map.
*/
{
: vm_map_min(pMap);
if (pvFixed != (void *)-1)
MapAddress + cb);
0, /* offset */
&MapAddress, /* addr (IN/OUT) */
cb, /* length */
VM_PROT_NONE, /* protection */
VM_PROT_ALL, /* max(_prot) ?? */
0); /* cow (copy-on-write) */
if (rc == KERN_SUCCESS)
{
if (R0Process != NIL_RTR0PROCESS)
{
MapAddress + cb,
}
return VINF_SUCCESS;
}
}
else
rc = VERR_NO_MEMORY;
return rc;
}
int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
{
}
int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
{
}
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
{
/*
* Check that the specified alignment is supported.
*/
if (uAlignment > PAGE_SIZE)
return VERR_NOT_SUPPORTED;
/* Phys: see pmap_mapdev in i386/i386/pmap.c (http://fxr.watson.org/fxr/source/i386/i386/pmap.c?v=RELENG62#L2860) */
/** @todo finish the implementation. */
return VERR_NOT_IMPLEMENTED;
}
int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
{
/*
* Check for unsupported stuff.
*/
AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
if (uAlignment > PAGE_SIZE)
return VERR_NOT_SUPPORTED;
int rc;
/* calc protection */
vm_prot_t ProtectionFlags = 0;
/* calc mapping address */
vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
if (!RT_UNLIKELY(pObjectNew))
return VERR_NO_MEMORY;
/* Insert the object in the map. */
pObjectNew, /* Object to map */
0, /* Start offset in the object */
TRUE, /* Whether a suitable address should be searched for first */
ProtectionFlags, /* protection flags */
VM_PROT_ALL, /* Maximum protection flags */
0); /* Copy on write */
/* Map the memory page by page into the destination map. */
if (rc == KERN_SUCCESS)
{
{
/* Mapping physical allocations */
/* Insert the memory page by page into the mapping. */
{
#if __FreeBSD_version >= 701105
#else
#endif
}
}
{
/* Mapping page memory object */
/* Insert the memory page by page into the mapping. */
{
#if __FreeBSD_version >= 701105
#else
#endif
}
}
else
{
/* Mapping cont or low memory types */
{
#if __FreeBSD_version >= 701105
#else
#endif
}
}
}
if (RT_SUCCESS(rc))
{
/*
* Create a mapping object for it.
*/
(void *)AddrR3,
if (pMemFreeBSD)
{
return VINF_SUCCESS;
}
}
if (RT_FAILURE(rc))
return VERR_NO_MEMORY;
}
{
return VERR_NOT_SUPPORTED;
}
{
{
case RTR0MEMOBJTYPE_LOCK:
{
{
/* later */
return NIL_RTHCPHYS;
}
}
case RTR0MEMOBJTYPE_PAGE:
{
}
case RTR0MEMOBJTYPE_MAPPING:
{
{
}
}
case RTR0MEMOBJTYPE_CONT:
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PHYS_NC:
{
}
case RTR0MEMOBJTYPE_RES_VIRT:
case RTR0MEMOBJTYPE_LOW:
default:
return NIL_RTHCPHYS;
}
}