memobj-r0drv-darwin.cpp revision 8990b255e0affb03cf695fdf2ec381e67887a97d
/* $Id$ */
/** @file
* IPRT - Ring-0 Memory Objects, Darwin.
*/
/*
* Copyright (C) 2006-2007 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "the-darwin-kernel.h"
#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
# include <iprt/asm-amd64-x86.h>
#endif
/*#define USE_VM_MAP_WIRE - may re-enable later when non-mapped allocations are added. */
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* The Darwin version of the memory object structure.
*/
typedef struct RTR0MEMOBJDARWIN
{
/** The core structure. */
/** Pointer to the memory descriptor created for allocated and locked memory. */
/** Pointer to the memory mapping object for mapped memory. */
/**
* HACK ALERT!
*
* Touch the pages to force the kernel to create the page
* table entries. This is necessary since the kernel gets
* upset if we take a page fault when preemption is disabled
* disabling interrupts when taking the traps, weird stuff.
*
* @param pv Pointer to the first page.
* @param cb The number of bytes.
*/
{
for (;;)
{
break;
}
}
/**
* Gets the virtual memory map the specified object is mapped into.
*
* @returns VM map handle on success, NULL if no map.
* @param pMem The memory object.
*/
{
{
case RTR0MEMOBJTYPE_PAGE:
case RTR0MEMOBJTYPE_LOW:
case RTR0MEMOBJTYPE_CONT:
return kernel_map;
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PHYS_NC:
return NULL; /* pretend these have no mapping atm. */
case RTR0MEMOBJTYPE_LOCK:
case RTR0MEMOBJTYPE_RES_VIRT:
case RTR0MEMOBJTYPE_MAPPING:
default:
return NULL;
}
}
#if 0 /* not necessary after all*/
/* My vm_map mockup. */
struct my_vm_map
{
struct my_vm_map_header
{
struct vm_map_links
{
void *prev;
void *next;
} links;
int nentries;
} hdr;
};
/**
* Gets the minimum map address, this is similar to get_map_min.
*
* @returns The start address of the map.
* @param pMap The map.
*/
{
/* lazy discovery of the correct offset. The apple guys is a wonderfully secretive bunch. */
{
{
break;
}
}
/* calculate it. */
}
#endif /* unused */
#ifdef RT_STRICT
/**
* Read from a physical page.
*
* @param HCPhys The address to start reading at.
* @param cb How many bytes to read.
* @param pvDst Where to put the bytes. This is zero'd on failure.
*/
{
IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
if (pMemDesc)
{
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
IOMemoryMap *pMemMap = pMemDesc->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapDefaultCache);
#else
#endif
if (pMemMap)
{
}
else
}
else
}
/**
* Gets the PTE for a page.
*
* @returns the PTE.
* @param pvPage The virtual address to get the PTE for.
*/
{
bool fPAE = false;
bool fLMA = false;
{
fPAE = true;
{
fLMA = true;
}
}
if (fLMA)
{
/* PML4 */
rtR0MemObjDarwinReadPhys((cr3 & ~(RTCCUINTREG)PAGE_OFFSET_MASK) | (((uint64_t)(uintptr_t)pvPage >> 39) & 0x1ff) * 8, 8, &u64);
{
return 0;
}
/* PDPTR */
rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 30) & 0x1ff) * 8, 8, &u64);
{
return 0;
}
/* PD */
rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
{
return 0;
}
/* PD */
rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
{
return 0;
}
return u64.u;
}
if (fPAE)
{
/* PDPTR */
rtR0MemObjDarwinReadPhys((u64.u & 0xffffffe0 /*X86_CR3_PAE_PAGE_MASK*/) | (((uintptr_t)pvPage >> 30) & 0x3) * 8, 8, &u64);
return 0;
/* PD */
rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 21) & 0x1ff) * 8, 8, &u64);
return 0;
/* PD */
rtR0MemObjDarwinReadPhys((u64.u & ~(uint64_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x1ff) * 8, 8, &u64);
return 0;
return u64.u;
}
/* PD */
rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 22) & 0x3ff) * 4, 4, &u64);
return 0;
/* PD */
rtR0MemObjDarwinReadPhys((u64.au32[0] & ~(uint32_t)PAGE_OFFSET_MASK) | (((uintptr_t)pvPage >> 12) & 0x3ff) * 4, 4, &u64);
return 0;
return 0;
}
#endif /* RT_STRICT */
{
/*
*/
if (pMemDarwin->pMemDesc)
{
}
if (pMemDarwin->pMemMap)
{
}
/*
* Release any memory that we've allocated or locked.
*/
{
case RTR0MEMOBJTYPE_LOW:
case RTR0MEMOBJTYPE_PAGE:
case RTR0MEMOBJTYPE_CONT:
break;
case RTR0MEMOBJTYPE_LOCK:
{
#ifdef USE_VM_MAP_WIRE
: kernel_map;
0 /* not user */);
#endif
break;
}
case RTR0MEMOBJTYPE_PHYS:
/*if (pMemDarwin->Core.u.Phys.fAllocated)
IOFreePhysical(pMemDarwin->Core.u.Phys.PhysBase, pMemDarwin->Core.cb);*/
break;
case RTR0MEMOBJTYPE_PHYS_NC:
AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
return VERR_INTERNAL_ERROR;
case RTR0MEMOBJTYPE_RES_VIRT:
AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n"));
return VERR_INTERNAL_ERROR;
case RTR0MEMOBJTYPE_MAPPING:
/* nothing to do here. */
break;
default:
return VERR_INTERNAL_ERROR;
}
return VINF_SUCCESS;
}
/**
* Kernel memory alloc worker that uses inTaskWithPhysicalMask.
*
* @returns IPRT status code.
* @retval VERR_ADDRESS_TOO_BIG try another way.
*
* @param ppMem Where to return the memory object.
* @param cb The page aligned memory size.
* @param fExecutable Whether the mapping needs to be executable.
* @param fContiguous Whether the backing memory needs to be contiguous.
* @param PhysMask The mask for the backing memory (i.e. range). Use 0 if
* you don't care that much or is speculating.
* @param MaxPhysAddr The max address to verify the result against. Use
* UINT64_MAX if it doesn't matter.
* @param enmType The object type.
*/
bool fExecutable, bool fContiguous,
{
/*
* Try inTaskWithPhysicalMask first, but since we don't quite trust that it
* actually respects the physical memory mask (10.5.x is certainly busted),
* we'll use rtR0MemObjNativeAllocCont as a fallback for dealing with that.
*
* The kIOMemoryKernelUserShared flag just forces the result to be page aligned.
*/
#if 1 /** @todo Figure out why this is broken. Is it only on snow leopard? Seen allocating memory for the VM structure, last page corrupted or inaccessible. */
#else
#endif
int rc;
| (fContiguous ? kIOMemoryPhysicallyContiguous : 0),
PhysMask);
if (pMemDesc)
{
if (IORet == kIOReturnSuccess)
{
if (pv)
{
/*
* Check if it's all below 4GB.
*/
{
#ifdef __LP64__ /* Grumble! */
#else
#endif
if ( Addr > MaxPhysAddr
|| !Addr
|| (Addr & PAGE_OFFSET_MASK)
|| ( fContiguous
&& !off
{
/* Buggy API, try allocate the memory another way. */
if (PhysMask)
LogAlways(("rtR0MemObjNativeAllocWorker: off=%x Addr=%llx AddrPrev=%llx MaxPhysAddr=%llx PhysMas=%llx - buggy API!\n",
return VERR_ADDRESS_TOO_BIG;
}
}
#ifdef RT_STRICT
/* check that the memory is actually mapped. */
//addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
//printf("rtR0MemObjNativeAllocWorker: pv=%p %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
#endif
/*
* Create the IPRT memory object.
*/
PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), enmType, pv, cb);
if (pMemDarwin)
{
if (fContiguous)
{
#ifdef __LP64__ /* Grumble! */
#else
#endif
if (enmType == RTR0MEMOBJTYPE_CONT)
else if (enmType == RTR0MEMOBJTYPE_PHYS)
else
}
#if 0 /* Experimental code. */
if (fExecutable)
rc = rtR0MemObjNativeProtect(&pMemDarwin->Core, 0, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC);
else
#endif
rc = VINF_SUCCESS;
if (RT_SUCCESS(rc))
{
return VINF_SUCCESS;
}
}
if (enmType == RTR0MEMOBJTYPE_PHYS_NC)
else if (enmType == RTR0MEMOBJTYPE_LOW)
else if (enmType == RTR0MEMOBJTYPE_CONT)
else
rc = VERR_NO_MEMORY;
}
else
}
else
}
else
return rc;
}
{
}
{
/*
* Try IOMallocPhysical/IOMallocAligned first.
* Then try optimistically without a physical address mask, which will always
* end up using IOMallocAligned.
*
* (See bug comment in the worker and IOBufferMemoryDescriptor::initWithPhysicalMask.)
*/
if (rc == VERR_ADDRESS_TOO_BIG)
return rc;
}
{
/*
* Workaround for bogus IOKernelAllocateContiguous behavior, just in case.
* cb <= PAGE_SIZE allocations take a different path, using a different allocator.
*/
return rc;
}
DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
{
/** @todo alignment */
if (uAlignment != PAGE_SIZE)
return VERR_NOT_SUPPORTED;
/*
* Translate the PhysHighest address into a mask.
*/
int rc;
if (PhysHighest == NIL_RTHCPHYS)
else
{
PhysMask = ~(mach_vm_address_t)0;
PhysMask >>= 1;
}
return rc;
}
DECLHIDDEN(int) rtR0MemObjNativeAllocPhysNC(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest)
{
/** @todo rtR0MemObjNativeAllocPhys / darwin.
* This might be a bit problematic and may very well require having to create our own
* object which we populate with pages but without mapping it into any address space.
* Estimate is 2-3 days.
*/
return VERR_NOT_SUPPORTED;
}
DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
{
/*
* Create a descriptor for it (the validation is always true on intel macs, but
* as it doesn't harm us keep it in).
*/
int rc = VERR_ADDRESS_TOO_BIG;
{
IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRanges(&aRanges[0], RT_ELEMENTS(aRanges),
if (pMemDesc)
{
#ifdef __LP64__ /* Grumble! */
#else
#endif
/*
* Create the IPRT memory object.
*/
PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_PHYS, NULL, cb);
if (pMemDarwin)
{
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
}
else
}
else
return rc;
}
/**
* Internal worker for locking down pages.
*
* @return IPRT status code.
*
* @param ppMem Where to store the memory object pointer.
* @param pv First page.
* @param cb Number of bytes.
* @param fAccess The desired access, a combination of RTMEM_PROT_READ
* and RTMEM_PROT_WRITE.
* @param Task The task \a pv and \a cb refers to.
*/
static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
{
#ifdef USE_VM_MAP_WIRE
/*
* First try lock the memory.
*/
int rc = VERR_LOCK_FAILED;
0 /* not user */);
if (kr == KERN_SUCCESS)
{
/*
* Create the IPRT memory object.
*/
PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
if (pMemDarwin)
{
return VINF_SUCCESS;
}
kr = vm_map_unwire(get_task_map(Task), (vm_map_offset_t)pv, (vm_map_offset_t)pv + cb, 0 /* not user */);
rc = VERR_NO_MEMORY;
}
#else
/*
* Create a descriptor and try lock it (prepare).
*/
int rc = VERR_MEMOBJ_INIT_FAILED;
IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withAddressRange((vm_address_t)pv, cb, kIODirectionInOut, Task);
if (pMemDesc)
{
if (IORet == kIOReturnSuccess)
{
/*
* Create the IPRT memory object.
*/
PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_LOCK, pv, cb);
if (pMemDarwin)
{
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
}
else
}
#endif
return rc;
}
DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
}
DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
}
DECLHIDDEN(int) rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
{
return VERR_NOT_SUPPORTED;
}
DECLHIDDEN(int) rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
{
return VERR_NOT_SUPPORTED;
}
DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
{
/*
* Check that the specified alignment is supported.
*/
if (uAlignment > PAGE_SIZE)
return VERR_NOT_SUPPORTED;
/*
* Must have a memory descriptor that we can map.
*/
int rc = VERR_INVALID_PARAMETER;
if (pMemToMapDarwin->pMemDesc)
{
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
0,
cbSub);
#else
0,
cbSub);
#endif
if (pMemMap)
{
{
//addr64_t Addr = pMemToMapDarwin->pMemDesc->getPhysicalSegment64(offSub, NULL);
//printf("pv=%p: %8llx %8llx\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr);
// /*
// * Explicitly lock it so that we're sure it is present and that
// * its PTEs cannot be recycled.
// * Note! withAddressRange() doesn't work as it adds kIOMemoryTypeVirtual64
// * to the options which causes prepare() to not wire the pages.
// * This is probably a bug.
// */
// IOAddressRange Range = { (mach_vm_address_t)pv, cbSub };
// IOMemoryDescriptor *pMemDesc = IOMemoryDescriptor::withOptions(&Range,
// 1 /* count */,
// 0 /* offset */,
// kernel_task,
// kIODirectionInOut | kIOMemoryTypeVirtual,
// kIOMapperSystem);
// if (pMemDesc)
// {
// IOReturn IORet = pMemDesc->prepare(kIODirectionInOut);
// if (IORet == kIOReturnSuccess)
// {
/* HACK ALERT! */
/** @todo First, the memory should've been mapped by now, and second, it
* should have the wired attribute in the PTE (bit 9). Neither is
* seems to be the case. The disabled locking code doesn't make any
* difference, which is extremely odd, and breaks
* rtR0MemObjNativeGetPagePhysAddr (getPhysicalSegment64 -> 64 for the
* lock descriptor. */
//addr64_t Addr = pMemDesc->getPhysicalSegment64(0, NULL);
//printf("pv=%p: %8llx %8llx (%d)\n", pv, rtR0MemObjDarwinGetPTE(pv), Addr, 2);
/*
* Create the IPRT memory object.
*/
PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
if (pMemDarwin)
{
// pMemDarwin->pMemDesc = pMemDesc;
return VINF_SUCCESS;
}
// pMemDesc->complete();
// rc = VERR_NO_MEMORY;
// }
// else
// rc = RTErrConvertFromDarwinIO(IORet);
// pMemDesc->release();
// }
// else
// rc = VERR_MEMOBJ_INIT_FAILED;
}
else
}
else
}
return rc;
}
DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
{
/*
* Check for unsupported things.
*/
if (uAlignment > PAGE_SIZE)
return VERR_NOT_SUPPORTED;
/*
* Must have a memory descriptor.
*/
int rc = VERR_INVALID_PARAMETER;
if (pMemToMapDarwin->pMemDesc)
{
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
0,
0 /* offset */,
0 /* length */);
#else
0,
#endif
if (pMemMap)
{
{
/*
* Create the IPRT memory object.
*/
PRTR0MEMOBJDARWIN pMemDarwin = (PRTR0MEMOBJDARWIN)rtR0MemObjNew(sizeof(*pMemDarwin), RTR0MEMOBJTYPE_MAPPING,
if (pMemDarwin)
{
return VINF_SUCCESS;
}
rc = VERR_NO_MEMORY;
}
else
}
else
}
return rc;
}
DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t cbSub, uint32_t fProt)
{
/* Get the map for the object. */
if (!pVmMap)
return VERR_NOT_SUPPORTED;
/* Convert the protection. */
switch (fProt)
{
case RTMEM_PROT_NONE:
break;
case RTMEM_PROT_READ:
break;
case RTMEM_PROT_READ | RTMEM_PROT_WRITE:
break;
break;
case RTMEM_PROT_WRITE | RTMEM_PROT_EXEC:
break;
case RTMEM_PROT_EXEC:
break;
default:
}
/* do the job. */
false,
if (krc != KERN_SUCCESS)
return RTErrConvertFromDarwinKern(krc);
return VINF_SUCCESS;
}
{
#ifdef USE_VM_MAP_WIRE
/*
* Locked memory doesn't have a memory descriptor and
* needs to be handled differently.
*/
{
else
{
/*
* From what I can tell, Apple seems to have locked up the all the
* available interfaces that could help us obtain the pmap_t of a task
* or vm_map_t.
* So, we'll have to figure out where in the vm_map_t structure it is
* and read it our selves. ASSUMING that kernel_pmap is pointed to by
* kernel_map->pmap, we scan kernel_map to locate the structure offset.
* Not nice, but it will hopefully do the job in a reliable manner...
*
* (get_task_pmap, get_map_pmap or vm_map_pmap is what we really need btw.)
*/
static int s_offPmap = -1;
{
for (; p < pEnd; p++)
if (*p == kernel_pmap)
{
break;
}
}
pmap_t Pmap = *(pmap_t *)((uintptr_t)get_task_map((task_t)pMemDarwin->Core.u.Lock.R0Process) + s_offPmap);
}
}
else
#endif /* USE_VM_MAP_WIRE */
{
/*
* Get the memory descriptor.
*/
if (!pMemDesc)
/*
* If we've got a memory descriptor, use getPhysicalSegment64().
*/
#ifdef __LP64__ /* Grumble! */
#else
#endif
AssertMsgReturn(PhysAddr == Addr, ("PhysAddr=%RHp Addr=%RX64\n", PhysAddr, (uint64_t)Addr), NIL_RTHCPHYS);
}
return PhysAddr;
}