/* $Id$ */
/** @file
* IPRT - Ring-0 Memory Objects, Haiku.
*/
/*
* Copyright (C) 2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "the-haiku-kernel.h"
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* The Haiku version of the memory object structure.
*/
typedef struct RTR0MEMOBJHAIKU
{
/** The core structure. */
/** Area identifier */
//MALLOC_DEFINE(M_IPRTMOBJ, "iprtmobj", "IPRT - R0MemObj");
#if 0
/**
* Gets the virtual memory map the specified object is mapped into.
*
* @returns VM map handle on success, NULL if no map.
* @param pMem The memory object.
*/
{
{
case RTR0MEMOBJTYPE_PAGE:
case RTR0MEMOBJTYPE_LOW:
case RTR0MEMOBJTYPE_CONT:
return kernel_map;
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PHYS_NC:
return NULL; /* pretend these have no mapping atm. */
case RTR0MEMOBJTYPE_LOCK:
case RTR0MEMOBJTYPE_RES_VIRT:
case RTR0MEMOBJTYPE_MAPPING:
default:
return NULL;
}
}
#endif
{
{
case RTR0MEMOBJTYPE_PAGE:
case RTR0MEMOBJTYPE_LOW:
case RTR0MEMOBJTYPE_CONT:
case RTR0MEMOBJTYPE_MAPPING:
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PHYS_NC:
{
break;
}
case RTR0MEMOBJTYPE_LOCK:
{
break;
}
case RTR0MEMOBJTYPE_RES_VIRT:
{
break;
}
default:
return VERR_INTERNAL_ERROR;
}
return VINF_SUCCESS;
}
{
int rc;
LogFlowFunc(("ppMem=%p cb=%u, fExecutable=%s, type=%08x, PhysHighest=%RX64 uAlignment=%u\n", ppMem,(unsigned)cb,
switch (type)
{
case RTR0MEMOBJTYPE_PAGE:
pszName = "IPRT R0MemObj Alloc";
fLock = B_FULL_LOCK;
break;
case RTR0MEMOBJTYPE_LOW:
pszName = "IPRT R0MemObj AllocLow";
break;
case RTR0MEMOBJTYPE_CONT:
pszName = "IPRT R0MemObj AllocCont";
break;
#if 0
case RTR0MEMOBJTYPE_MAPPING:
pszName = "IPRT R0MemObj Mapping";
fLock = B_FULL_LOCK;
break;
#endif
case RTR0MEMOBJTYPE_PHYS:
/** @todo alignment */
if (uAlignment != PAGE_SIZE)
return VERR_NOT_SUPPORTED;
/** @todo r=ramshankar: no 'break' here?? */
case RTR0MEMOBJTYPE_PHYS_NC:
pszName = "IPRT R0MemObj AllocPhys";
break;
#if 0
case RTR0MEMOBJTYPE_LOCK:
break;
#endif
default:
return VERR_INTERNAL_ERROR;
}
/* Create the object. */
if (RT_UNLIKELY(!pMemHaiku))
return VERR_NO_MEMORY;
rc = pMemHaiku->AreaId = create_area(pszName, &pvMap, addressSpec, cb, fLock, B_READ_AREA | B_WRITE_AREA);
{
switch (type)
{
case RTR0MEMOBJTYPE_CONT:
break;
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_PHYS_NC:
{
}
break;
default:
break;
}
{
return VINF_SUCCESS;
}
}
return RTErrConvertFromHaikuKernReturn(rc);
}
{
return rtR0MemObjNativeAllocArea(ppMem, cb, fExecutable, RTR0MEMOBJTYPE_PAGE, 0 /* PhysHighest */, 0 /* uAlignment */);
}
{
return rtR0MemObjNativeAllocArea(ppMem, cb, fExecutable, RTR0MEMOBJTYPE_LOW, 0 /* PhysHighest */, 0 /* uAlignment */);
}
{
return rtR0MemObjNativeAllocArea(ppMem, cb, fExecutable, RTR0MEMOBJTYPE_CONT, 0 /* PhysHighest */, 0 /* uAlignment */);
}
int rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
{
}
{
}
int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
{
LogFlowFunc(("ppMem=%p Phys=%08x cb=%u uCachePolicy=%x\n", ppMem, Phys,(unsigned)cb, uCachePolicy));
/* Create the object. */
PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_PHYS, NULL, cb);
if (!pMemHaiku)
return VERR_NO_MEMORY;
/* There is no allocation here, it needs to be mapped somewhere first. */
return VINF_SUCCESS;
}
/**
* Worker locking the memory in either kernel or user maps.
*
* @returns IPRT status code.
* @param ppMem Where to store the allocated memory object.
* @param pvStart The starting address.
* @param cb The size of the block.
* @param fAccess The mapping protection to apply.
* @param R0Process The process to map the memory to (use NIL_RTR0PROCESS
* for the kernel)
* @param fFlags Memory flags (B_READ_DEVICE indicates the memory is
* intended to be written from a "device").
*/
static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvStart, size_t cb, uint32_t fAccess,
{
int rc;
LogFlowFunc(("ppMem=%p pvStart=%p cb=%u fAccess=%x R0Process=%d fFlags=%x\n", ppMem, pvStart, cb, fAccess, R0Process,
fFlags));
/* Create the object. */
PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_LOCK, pvStart, cb);
if (RT_UNLIKELY(!pMemHaiku))
return VERR_NO_MEMORY;
if (R0Process != NIL_RTR0PROCESS)
{
return VINF_SUCCESS;
}
return RTErrConvertFromHaikuKernReturn(rc);
}
int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
}
{
}
#if 0
/** @todo Reserve address space */
/**
* Worker for the two virtual address space reservers.
*
* We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
*/
static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
{
int rc;
LogFlowFunc(("ppMem=%p pvFixed=%p cb=%u uAlignment=%u R0Process=%d\n", ppMem, pvFixed, (unsigned)cb, uAlignment, R0Process));
if (R0Process != NIL_RTR0PROCESS)
/* Check that the specified alignment is supported. */
if (uAlignment > PAGE_SIZE)
return VERR_NOT_SUPPORTED;
/* Create the object. */
PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
if (!pMemHaiku)
return VERR_NO_MEMORY;
/* Ask the kernel to reserve the address range. */
//XXX: vm_reserve_address_range ?
return VERR_NOT_SUPPORTED;
}
#endif
int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
{
return VERR_NOT_SUPPORTED;
}
int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
{
return VERR_NOT_SUPPORTED;
}
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
{
#if 0
/** @todo r=ramshankar: Wrong format specifiers, fix later! */
#endif
/* Check that the specified alignment is supported. */
if (uAlignment > PAGE_SIZE)
return VERR_NOT_SUPPORTED;
/* We can't map anything to the first page, sorry. */
if (pvFixed == 0)
return VERR_NOT_SUPPORTED;
if (fProt & RTMEM_PROT_READ)
if (fProt & RTMEM_PROT_WRITE)
/*
* Either the object we map has an area associated with, which we can clone,
* or it's a physical address range which we must map.
*/
{
if (pvFixed == (void *)-1)
rc = area = clone_area("IPRT R0MemObj MapKernel", &pvMap, uAddrSpec, fProtect, pMemToMapHaiku->AreaId);
LogFlow(("rtR0MemObjNativeMapKernel: clone_area uAddrSpec=%d fProtect=%x AreaId=%d rc=%d\n", uAddrSpec, fProtect,
}
{
/* map_physical_memory() won't let you choose where. */
if (pvFixed != (void *)-1)
return VERR_NOT_SUPPORTED;
rc = area = map_physical_memory("IPRT R0MemObj MapKernelPhys", (phys_addr_t)pMemToMapHaiku->Core.u.Phys.PhysBase,
}
else
return VERR_NOT_SUPPORTED;
{
/* Create the object. */
if (RT_UNLIKELY(!pMemHaiku))
return VERR_NO_MEMORY;
return VINF_SUCCESS;
}
/** @todo finish the implementation. */
return rc;
}
int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
{
#if 0
/*
* Check for unsupported stuff.
*/
AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
if (uAlignment > PAGE_SIZE)
return VERR_NOT_SUPPORTED;
int rc;
/* calc protection */
vm_prot_t ProtectionFlags = 0;
/* calc mapping address */
vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
/* Insert the object in the map. */
NULL, /* Object to map */
0, /* Start offset in the object */
TRUE, /* Whether a suitable address should be searched for first */
ProtectionFlags, /* protection flags */
VM_PROT_ALL, /* Maximum protection flags */
0); /* Copy on write */
/* Map the memory page by page into the destination map. */
if (rc == KERN_SUCCESS)
{
{
/* Mapping physical allocations */
/* Insert the memory page by page into the mapping. */
{
}
}
else
{
/* Mapping cont or low memory types */
{
}
}
}
if (RT_SUCCESS(rc))
{
/*
* Create a mapping object for it.
*/
(void *)AddrR3,
if (pMemHaiku)
{
return VINF_SUCCESS;
}
}
#endif
return VERR_NOT_SUPPORTED;
}
{
return VERR_NOT_SUPPORTED;
}
{
/** @todo r=ramshankar: Validate objects */
LogFlow(("rtR0MemObjNativeGetPagePhysAddr: pMem=%p enmType=%x iPage=%u\n", pMem, pMemHaiku->Core.enmType,(unsigned)iPage));
{
case RTR0MEMOBJTYPE_LOCK:
{
return NIL_RTHCPHYS;
}
#if 0
case RTR0MEMOBJTYPE_MAPPING:
{
{
}
}
#endif
case RTR0MEMOBJTYPE_CONT:
case RTR0MEMOBJTYPE_PHYS:
case RTR0MEMOBJTYPE_LOW:
case RTR0MEMOBJTYPE_PAGE:
case RTR0MEMOBJTYPE_PHYS_NC:
{
return NIL_RTHCPHYS;
}
case RTR0MEMOBJTYPE_RES_VIRT:
default:
return NIL_RTHCPHYS;
}
}